comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Since these are local tests, we should be able to reduce the polling interval and not require sleeping for 10 seconds. | public void waitUntilOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
PollResponse<Response> expected = new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return expected;
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = assertDoesNotThrow(() -> poller.waitUntil(Duration.ofMillis(1000),
SUCCESSFULLY_COMPLETED));
assertEquals(expected.getValue().getResponse(), pollResponse.getValue().getResponse());
} | Thread.sleep(10000); | public void waitUntilOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
PollResponse<Response> expected = new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return expected;
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = assertDoesNotThrow(() -> poller.waitUntil(Duration.ofMillis(1000),
SUCCESSFULLY_COMPLETED));
assertEquals(expected.getValue().getResponse(), pollResponse.getValue().getResponse());
} | class SimpleSyncPollerTests {
private static final Duration TEN_MILLIS = Duration.ofMillis(10);
@Test
public void noPollingForSynchronouslyCompletedActivationInSyncPollerTest() {
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, PollResponse<Response>> activationOperationWithResponse = ignored -> {
activationCallCount[0]++;
return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("ActivationDone"));
};
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
throw new RuntimeException("Polling shouldn't happen for synchronously completed activation.");
};
SyncPoller<Response, CertificateOutput> syncPoller = new SimpleSyncPoller<>(TEN_MILLIS,
activationOperationWithResponse, pollOperation, (ignored1, ignore2) -> null, ignored -> null);
try {
PollResponse<Response> response = syncPoller.waitForCompletion(Duration.ofSeconds(1));
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus());
assertEquals(1, activationCallCount[0]);
} catch (Exception e) {
fail("SyncPoller did not complete on activation", e);
}
}
@Test
public void syncPollerConstructorPollIntervalZero() {
assertThrows(IllegalArgumentException.class, () -> new SimpleSyncPoller<>(Duration.ZERO,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNegative() {
assertThrows(IllegalArgumentException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(-1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(null,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncConstructorActivationOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1), null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), null, (ignored1, ignored2) -> null,
ignored -> null));
}
@Test
public void syncPollerConstructorCancelOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, null,
ignored -> null));
}
@Test
public void syncPollerConstructorFetchResultOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, null));
}
@Test
public void syncPollerShouldCallActivationFromConstructor() {
boolean[] activationCalled = new boolean[1];
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response>
activationOperation = ignored -> {
activationCalled[0] = true;
return new Response("ActivationDone");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
ignored -> null, (ignored1, ignored2) -> null, ignored -> null);
Assertions.assertTrue(activationCalled[0]);
}
@Test
public void eachPollShouldReceiveLastPollResponse() {
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> new Response("A");
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = pollingContext -> {
Assertions.assertNotNull(pollingContext.getActivationResponse());
Assertions.assertNotNull(pollingContext.getLatestResponse());
PollResponse<Response> latestResponse = pollingContext.getLatestResponse();
Assertions.assertNotNull(latestResponse);
return new PollResponse<>(IN_PROGRESS, new Response(latestResponse.getValue().toString() + "A"),
TEN_MILLIS);
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: AA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: AAA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: Response: AAAA"));
}
@Test
public void waitForCompletionShouldReturnTerminalPollResponse() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return response2;
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(response2.getValue().getResponse(), poller.waitForCompletion().getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}
@Test
public void getResultShouldPollUntilCompletionAndFetchResult() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
Function<PollingContext<Response>, CertificateOutput> fetchResultOperation
= ignored -> new CertificateOutput("cert1");
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
assertEquals(2, invocationCount[0]);
}
@Test
public void getResultShouldNotPollOnCompletedPoller() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
Function<PollingContext<Response>, CertificateOutput> fetchResultOperation
= ignored -> new CertificateOutput("cert1");
int[] invocationCount = new int[] { -1 };
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return response2;
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
}
@Test
public void waitUntilShouldPollAfterMatchingStatus() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
LongRunningOperationStatus matchStatus
= LongRunningOperationStatus.fromString("OTHER_1", false);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(matchStatus, new Response("1"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(2, invocationCount[0]);
}
@Test
public void verifyExceptionPropagationFromPollingOperationSyncPoller() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
final AtomicReference<Integer> cnt = new AtomicReference<>(0);
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
cnt.getAndSet(cnt.get() + 1);
if (cnt.get() <= 2) {
return new PollResponse<>(IN_PROGRESS, new Response("1"));
} else if (cnt.get() == 3) {
throw new RuntimeException("Polling operation failed!");
} else if (cnt.get() == 4) {
return new PollResponse<>(IN_PROGRESS, new Response("2"));
} else {
return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class, poller::getFinalResult);
assertTrue(exception.getMessage().contains("Polling operation failed!"));
}
@Test
public void testPollerFluxError() throws InterruptedException {
IllegalArgumentException expectedException = new IllegalArgumentException();
PollerFlux<String, String> pollerFlux = error(expectedException);
CountDownLatch countDownLatch = new CountDownLatch(1);
pollerFlux.subscribe(
response -> Assertions.fail("Did not expect a response"),
ex -> {
countDownLatch.countDown();
Assertions.assertSame(expectedException, ex);
},
() -> Assertions.fail("Did not expect the flux to complete")
);
boolean completed = countDownLatch.await(1, TimeUnit.SECONDS);
Assertions.assertTrue(completed);
}
@Test
public void waitUntilShouldPollToCompletion() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
LongRunningOperationStatus matchStatus = SUCCESSFULLY_COMPLETED;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("2"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after matching response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(matchStatus, poller.waitUntil(matchStatus).getStatus());
assertEquals(2, invocationCount[0]);
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitForCompletionSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void waitForCompletionOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitUntilSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Response> activationOperation = ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(Duration.ofMillis(100), SUCCESSFULLY_COMPLETED);
assertEquals(activationResponse.getResponse(), pollResponse.getValue().getResponse());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't reach the {@code statusToWaitFor} within the timeout period.
*/
@Test
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void getFinalResultSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void getFinalResultOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
public static class Response {
private final String response;
public Response(String response) {
this.response = response;
}
public String getResponse() {
return response;
}
@Override
public String toString() {
return "Response: " + response;
}
}
public static class CertificateOutput {
String name;
public CertificateOutput(String certName) {
name = certName;
}
public String getName() {
return name;
}
}
} | class SimpleSyncPollerTests {
private static final Duration TEN_MILLIS = Duration.ofMillis(10);
@Test
public void noPollingForSynchronouslyCompletedActivationInSyncPollerTest() {
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, PollResponse<Response>> activationOperationWithResponse = ignored -> {
activationCallCount[0]++;
return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("ActivationDone"));
};
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
throw new RuntimeException("Polling shouldn't happen for synchronously completed activation.");
};
SyncPoller<Response, CertificateOutput> syncPoller = new SimpleSyncPoller<>(TEN_MILLIS,
activationOperationWithResponse, pollOperation, (ignored1, ignore2) -> null, ignored -> null);
try {
PollResponse<Response> response = syncPoller.waitForCompletion(Duration.ofSeconds(1));
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus());
assertEquals(1, activationCallCount[0]);
} catch (Exception e) {
fail("SyncPoller did not complete on activation", e);
}
}
@Test
public void syncPollerConstructorPollIntervalZero() {
assertThrows(IllegalArgumentException.class, () -> new SimpleSyncPoller<>(Duration.ZERO,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNegative() {
assertThrows(IllegalArgumentException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(-1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(null,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncConstructorActivationOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1), null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), null, (ignored1, ignored2) -> null,
ignored -> null));
}
@Test
public void syncPollerConstructorCancelOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, null,
ignored -> null));
}
@Test
public void syncPollerConstructorFetchResultOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, null));
}
@Test
public void syncPollerShouldCallActivationFromConstructor() {
boolean[] activationCalled = new boolean[1];
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response>
activationOperation = ignored -> {
activationCalled[0] = true;
return new Response("ActivationDone");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
ignored -> null, (ignored1, ignored2) -> null, ignored -> null);
Assertions.assertTrue(activationCalled[0]);
}
@Test
public void eachPollShouldReceiveLastPollResponse() {
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> new Response("A");
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = pollingContext -> {
Assertions.assertNotNull(pollingContext.getActivationResponse());
Assertions.assertNotNull(pollingContext.getLatestResponse());
PollResponse<Response> latestResponse = pollingContext.getLatestResponse();
Assertions.assertNotNull(latestResponse);
return new PollResponse<>(IN_PROGRESS, new Response(latestResponse.getValue().toString() + "A"),
TEN_MILLIS);
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: AA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: AAA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: Response: AAAA"));
}
@Test
public void waitForCompletionShouldReturnTerminalPollResponse() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return response2;
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(response2.getValue().getResponse(), poller.waitForCompletion().getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}
@Test
public void getResultShouldPollUntilCompletionAndFetchResult() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
Function<PollingContext<Response>, CertificateOutput> fetchResultOperation
= ignored -> new CertificateOutput("cert1");
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
assertEquals(2, invocationCount[0]);
}
@Test
public void getResultShouldNotPollOnCompletedPoller() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
Function<PollingContext<Response>, CertificateOutput> fetchResultOperation
= ignored -> new CertificateOutput("cert1");
int[] invocationCount = new int[] { -1 };
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return response2;
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
}
@Test
public void waitUntilShouldPollAfterMatchingStatus() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
LongRunningOperationStatus matchStatus
= LongRunningOperationStatus.fromString("OTHER_1", false);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(matchStatus, new Response("1"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(2, invocationCount[0]);
}
@Test
public void verifyExceptionPropagationFromPollingOperationSyncPoller() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
final AtomicReference<Integer> cnt = new AtomicReference<>(0);
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
cnt.getAndSet(cnt.get() + 1);
if (cnt.get() <= 2) {
return new PollResponse<>(IN_PROGRESS, new Response("1"));
} else if (cnt.get() == 3) {
throw new RuntimeException("Polling operation failed!");
} else if (cnt.get() == 4) {
return new PollResponse<>(IN_PROGRESS, new Response("2"));
} else {
return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class, poller::getFinalResult);
assertTrue(exception.getMessage().contains("Polling operation failed!"));
}
@Test
public void testPollerFluxError() throws InterruptedException {
IllegalArgumentException expectedException = new IllegalArgumentException();
PollerFlux<String, String> pollerFlux = error(expectedException);
CountDownLatch countDownLatch = new CountDownLatch(1);
pollerFlux.subscribe(
response -> Assertions.fail("Did not expect a response"),
ex -> {
countDownLatch.countDown();
Assertions.assertSame(expectedException, ex);
},
() -> Assertions.fail("Did not expect the flux to complete")
);
boolean completed = countDownLatch.await(1, TimeUnit.SECONDS);
Assertions.assertTrue(completed);
}
@Test
public void waitUntilShouldPollToCompletion() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
LongRunningOperationStatus matchStatus = SUCCESSFULLY_COMPLETED;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("2"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after matching response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(matchStatus, poller.waitUntil(matchStatus).getStatus());
assertEquals(2, invocationCount[0]);
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitForCompletionSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void waitForCompletionOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitUntilSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Response> activationOperation = ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(Duration.ofMillis(100), SUCCESSFULLY_COMPLETED);
assertEquals(activationResponse.getResponse(), pollResponse.getValue().getResponse());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't reach the {@code statusToWaitFor} within the timeout period.
*/
@Test
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void getFinalResultSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void getFinalResultOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
public static class Response {
private final String response;
public Response(String response) {
this.response = response;
}
public String getResponse() {
return response;
}
@Override
public String toString() {
return "Response: " + response;
}
}
public static class CertificateOutput {
String name;
public CertificateOutput(String certName) {
name = certName;
}
public String getName() {
return name;
}
}
} |
It isn't making a service call but it's a scenario where we want time to actually block. This is testing a case where we set the waitUntil timeout to a few 100ms and never reach the state we wanted. | public void waitUntilOperationWithTimeout() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
PollResponse<Response> expected = new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10));
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return Mono.just(expected);
} else {
return Mono.delay(Duration.ofSeconds(5))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = assertDoesNotThrow(() -> poller.waitUntil(Duration.ofMillis(1000),
SUCCESSFULLY_COMPLETED));
assertEquals("0", pollResponse.getValue().getResponse());
} | return Mono.delay(Duration.ofSeconds(5)) | public void waitUntilOperationWithTimeout() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
PollResponse<Response> expected = new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10));
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return Mono.just(expected);
} else {
return Mono.delay(Duration.ofSeconds(5))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = assertDoesNotThrow(() -> poller.waitUntil(Duration.ofMillis(1000),
SUCCESSFULLY_COMPLETED));
assertEquals("0", pollResponse.getValue().getResponse());
} | class PollerTests {
private static final Duration STEPVERIFIER_TIMEOUT = Duration.ofSeconds(30);
@Test
public void asyncPollerConstructorPollIntervalZero() {
assertThrows(IllegalArgumentException.class, () -> new PollerFlux<>(Duration.ZERO, ignored -> null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorPollIntervalNegative() {
assertThrows(IllegalArgumentException.class, () -> new PollerFlux<>(Duration.ofSeconds(-1), ignored -> null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorPollIntervalNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(null, ignored -> null, ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorActivationOperationNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), null, ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorPollOperationNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null, null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorCancelOperationNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null,
ignored -> null, null, ignored -> null));
}
@Test
public void asyncPollerConstructorFetchResultOperationNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null,
ignored -> null, (ignored1, ignored2) -> null,
null));
}
@Test
public void subscribeToSpecificOtherOperationStatusTest() {
final Duration retryAfter = Duration.ofMillis(10);
PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter);
PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_1", false),
new Response("2"), retryAfter);
PollResponse<Response> response3 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_2", false),
new Response("3"), retryAfter);
PollResponse<Response> response4 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("4"), retryAfter);
Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.empty();
int[] callCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (callCount[0]++) {
case 0: return Mono.just(response0);
case 1: return Mono.just(response1);
case 2: return Mono.just(response2);
case 3: return Mono.just(response3);
case 4: return Mono.just(response4);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response3.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response4.getStatus())
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
}
@Test
public void noPollingForSynchronouslyCompletedActivationTest() {
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse
= ignored -> Mono.fromCallable(() -> {
activationCallCount[0]++;
return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("ActivationDone"));
});
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.error(new RuntimeException("Polling shouldn't happen for synchronously completed activation."));
PollerFlux<Response, CertificateOutput> pollerFlux = create(Duration.ofMillis(10),
activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus()
== LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
assertEquals(1, activationCallCount[0]);
}
@Test
public void noPollingForSynchronouslyCompletedActivationInSyncPollerTest() {
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse
= ignored -> Mono.fromCallable(() -> {
activationCallCount[0]++;
return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("ActivationDone"));
});
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.error(new RuntimeException("Polling shouldn't happen for synchronously completed activation."));
SyncPoller<Response, CertificateOutput> syncPoller = create(Duration.ofMillis(10),
activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null,
ignored -> (Mono<CertificateOutput>) null)
.getSyncPoller();
try {
PollResponse<Response> response = syncPoller.waitForCompletion(Duration.ofSeconds(1));
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus());
assertEquals(1, activationCallCount[0]);
} catch (Exception e) {
fail("SyncPoller did not complete on activation", e);
}
}
@Test
public void ensurePollingForInProgressActivationResponseTest() {
final Duration retryAfter = Duration.ofMillis(10);
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse
= ignored -> Mono.fromCallable(() -> {
activationCallCount[0]++;
return new PollResponse<>(IN_PROGRESS, new Response("ActivationDone"));
});
PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter);
PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_1", false),
new Response("2"), retryAfter);
PollResponse<Response> response3 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("3"), retryAfter);
int[] callCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (callCount[0]++) {
case 0: return Mono.just(response0);
case 1: return Mono.just(response1);
case 2: return Mono.just(response2);
case 3: return Mono.just(response3);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = create(Duration.ofMillis(10),
activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.assertNext(asyncPollResponse -> assertEquals(response0.getStatus(), asyncPollResponse.getStatus()))
.assertNext(asyncPollResponse -> assertEquals(response1.getStatus(), asyncPollResponse.getStatus()))
.assertNext(asyncPollResponse -> assertEquals(response2.getStatus(), asyncPollResponse.getStatus()))
.assertNext(asyncPollResponse -> assertEquals(response3.getStatus(), asyncPollResponse.getStatus()))
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
assertEquals(1, activationCallCount[0]);
}
@Test
public void subscribeToActivationOnlyOnceTest() {
final Duration retryAfter = Duration.ofMillis(10);
PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter);
PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), retryAfter);
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.fromCallable(() -> {
activationCallCount[0]++;
return new Response("ActivationDone");
});
int[] pollCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (pollCallCount[0]++) {
case 0: return Mono.just(response0);
case 1: return Mono.just(response1);
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus())
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
pollCallCount[0] = 0;
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus())
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
assertEquals(1, activationCallCount[0]);
}
@Test
public void cancellationCanBeCalledFromOperatorChainTest() {
final Duration retryAfter = Duration.ofMillis(10);
PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter);
PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), retryAfter);
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] callCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (callCount[0]++) {
case 0: return Mono.just(response0);
case 1: return Mono.just(response1);
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
final List<Object> cancelParameters = new ArrayList<>();
BiFunction<PollingContext<Response>, PollResponse<Response>, Mono<Response>> cancelOperation
= (pollingContext, pollResponse) -> {
Collections.addAll(cancelParameters, pollingContext, pollResponse);
return Mono.just(new Response("OperationCancelled"));
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, cancelOperation, ignored -> null);
AtomicReference<AsyncPollResponse<Response, CertificateOutput>> secondAsyncResponse = new AtomicReference<>();
Response cancelResponse = pollerFlux
.take(2)
.last()
.flatMap((Function<AsyncPollResponse<Response, CertificateOutput>, Mono<Response>>) asyncPollResponse -> {
secondAsyncResponse.set(asyncPollResponse);
return asyncPollResponse.cancelOperation();
}).block();
Assertions.assertNotNull(cancelResponse);
Assertions.assertTrue(cancelResponse.getResponse().equalsIgnoreCase("OperationCancelled"));
Assertions.assertNotNull(secondAsyncResponse.get());
Assertions.assertEquals("1", secondAsyncResponse.get().getValue().getResponse());
assertEquals(2, cancelParameters.size());
assertEquals(activationResponse, ((PollingContext<?>) cancelParameters.get(0)).getActivationResponse()
.getValue());
assertEquals(activationResponse, ((PollResponse<?>) cancelParameters.get(1)).getValue());
}
@Test
public void getResultCanBeCalledFromOperatorChainTest() {
final Duration retryAfter = Duration.ofMillis(10);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), retryAfter);
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] callCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (callCount[0]++) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter));
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
final List<PollingContext<Response>> fetchResultParameters = new ArrayList<>();
Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation = pollingContext -> {
fetchResultParameters.add(pollingContext);
return Mono.just(new CertificateOutput("LROFinalResult"));
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
AtomicReference<AsyncPollResponse<Response, CertificateOutput>> terminalAsyncResponse = new AtomicReference<>();
CertificateOutput lroResult = pollerFlux
.takeUntil(apr -> apr.getStatus().isComplete())
.last()
.flatMap((Function<AsyncPollResponse<Response, CertificateOutput>, Mono<CertificateOutput>>)
asyncPollResponse -> {
terminalAsyncResponse.set(asyncPollResponse);
return asyncPollResponse.getFinalResult();
}).block();
Assertions.assertNotNull(lroResult);
Assertions.assertTrue(lroResult.getName().equalsIgnoreCase("LROFinalResult"));
Assertions.assertNotNull(terminalAsyncResponse.get());
Assertions.assertTrue(terminalAsyncResponse.get().getValue().getResponse().equalsIgnoreCase("2"));
assertEquals(1, fetchResultParameters.size());
PollingContext<Response> pollingContext = fetchResultParameters.get(0);
assertEquals(activationResponse, pollingContext.getActivationResponse().getValue());
assertEquals(response2, pollingContext.getLatestResponse());
}
@Test
public void verifyExceptionPropagationFromPollingOperation() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
final AtomicInteger cnt = new AtomicInteger();
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> {
int count = cnt.incrementAndGet();
if (count <= 2) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1")));
} else if (count == 3) {
return Mono.error(new RuntimeException("Polling operation failed!"));
} else if (count == 4) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("2")));
} else {
return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3")));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS)
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS)
.expectErrorMessage("Polling operation failed!")
.verify(STEPVERIFIER_TIMEOUT);
}
@Test
public void verifyErrorFromPollingOperation() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
final AtomicInteger cnt = new AtomicInteger();
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> {
int count = cnt.incrementAndGet();
if (count <= 2) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1")));
} else if (count == 3) {
return Mono.just(new PollResponse<>(FAILED, new Response("2")));
} else if (count == 4) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("3")));
} else {
return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("4")));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS)
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS)
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == FAILED)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
}
@Test
public void syncPollerConstructorPollIntervalZero() {
assertThrows(IllegalArgumentException.class, () -> new SyncOverAsyncPoller<>(Duration.ZERO,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNegative() {
assertThrows(IllegalArgumentException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(-1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(null,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncConstructorActivationOperationNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollOperationNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorCancelOperationNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, null,
ignored -> null));
}
@Test
public void syncPollerConstructorFetchResultOperationNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, null));
}
@Test
public void syncPollerShouldCallActivationFromConstructor() {
Boolean[] activationCalled = new Boolean[1];
activationCalled[0] = false;
Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.fromCallable(() -> {
activationCalled[0] = true;
return new Response("ActivationDone");
});
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
ignored -> null, (ignored1, ignored2) -> null, ignored -> null);
Assertions.assertTrue(activationCalled[0]);
}
@Test
public void eachPollShouldReceiveLastPollResponse() {
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(new Response("A"));
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = pollingContext -> {
Assertions.assertNotNull(pollingContext.getActivationResponse());
Assertions.assertNotNull(pollingContext.getLatestResponse());
PollResponse<Response> latestResponse = pollingContext.getLatestResponse();
Assertions.assertNotNull(latestResponse);
return Mono.just(new PollResponse<>(IN_PROGRESS,
new Response(latestResponse.getValue().toString() + "A"), Duration.ofMillis(10)));
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: AA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: AAA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: Response: AAAA"));
}
@Test
public void waitForCompletionShouldReturnTerminalPollResponse() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), Duration.ofMillis(10));
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] pollCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (pollCallCount[0]++) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}
@Test
public void getResultShouldPollUntilCompletionAndFetchResult() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
case 2: return Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), Duration.ofMillis(10)));
default: return Mono.error(new RuntimeException("Poll should not be called after terminal response"));
}
};
Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation
= ignored -> Mono.just(new CertificateOutput("cert1"));
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
assertEquals(2, invocationCount[0]);
}
@Test
public void getResultShouldNotPollOnCompletedPoller() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), Duration.ofMillis(10));
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation
= ignored -> Mono.just(new CertificateOutput("cert1"));
int[] pollCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (pollCallCount[0]++) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
}
@Test
public void waitUntilShouldPollAfterMatchingStatus() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
LongRunningOperationStatus matchStatus
= LongRunningOperationStatus.fromString("OTHER_1", false);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
case 2: return Mono.just(new PollResponse<>(matchStatus, new Response("1"), Duration.ofMillis(10)));
default: return Mono.error(new RuntimeException("Poll should not be called after matching response"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(2, invocationCount[0]);
}
@Test
public void verifyExceptionPropagationFromPollingOperationSyncPoller() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
final AtomicInteger cnt = new AtomicInteger();
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> {
int count = cnt.incrementAndGet();
if (count <= 2) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1")));
} else if (count == 3) {
return Mono.error(new RuntimeException("Polling operation failed!"));
} else if (count == 4) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("2")));
} else {
return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3")));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class, poller::getFinalResult);
assertEquals(exception.getMessage(), "Polling operation failed!");
}
@Test
public void testPollerFluxError() throws InterruptedException {
IllegalArgumentException expectedException = new IllegalArgumentException();
PollerFlux<String, String> pollerFlux = error(expectedException);
CountDownLatch countDownLatch = new CountDownLatch(1);
pollerFlux.subscribe(
response -> Assertions.fail("Did not expect a response"),
ex -> {
countDownLatch.countDown();
Assertions.assertSame(expectedException, ex);
},
() -> Assertions.fail("Did not expect the flux to complete")
);
boolean completed = countDownLatch.await(1, TimeUnit.SECONDS);
Assertions.assertTrue(completed);
}
@Test
public void testSyncPollerError() {
PollerFlux<String, String> pollerFlux = error(new IllegalArgumentException());
Assertions.assertThrows(IllegalArgumentException.class, pollerFlux::getSyncPoller);
}
@Test
public void testUpdatePollingIntervalWithoutVirtualTimer() {
PollerFlux<String, String> pollerFlux = PollerFlux.create(Duration.ofMillis(10),
context -> Mono.just(new PollResponse<>(IN_PROGRESS, "Activation")),
context -> Mono.just(new PollResponse<>(IN_PROGRESS, "PollOperation")),
(context, response) -> Mono.just("Cancel"),
context -> Mono.just("FinalResult"));
pollerFlux.setPollInterval(Duration.ofMillis(200));
StepVerifier.create(pollerFlux.take(5))
.thenAwait(Duration.ofSeconds(1))
.expectNextCount(5)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
}
@Test
public void testUpdatePollingInterval() {
PollerFlux<String, String> pollerFlux = PollerFlux.create(Duration.ofMillis(10),
context -> Mono.just(new PollResponse<>(IN_PROGRESS, "Activation")),
context -> Mono.just(new PollResponse<>(IN_PROGRESS, "PollOperation")),
(context, response) -> Mono.just("Cancel"),
context -> Mono.just("FinalResult"));
StepVerifier.create(pollerFlux.take(5))
.thenAwait(Duration.ofMillis(55))
.expectNextCount(5)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
pollerFlux.setPollInterval(Duration.ofMillis(50));
StepVerifier.create(pollerFlux.take(5))
.thenAwait(Duration.ofMillis(255))
.expectNextCount(5)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
pollerFlux.setPollInterval(Duration.ofMillis(195));
StepVerifier.create(pollerFlux.take(5))
.thenAwait(Duration.ofSeconds(1))
.expectNextCount(5)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitForCompletionSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception));
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void waitForCompletionOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
} else {
return Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception));
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitUntilSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(Duration.ofMillis(100), SUCCESSFULLY_COMPLETED);
assertEquals(activationResponse.getResponse(), pollResponse.getValue().getResponse());
}
/**
* Tests that the last received PollResponse is used when waitUtil times out.
*/
@Test
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void getFinalResultSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception));
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void getFinalResultOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
} else {
return Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception));
}
private static String printException(Throwable throwable) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
throwable.printStackTrace(pw);
return sw.toString();
}
public static class Response {
private final String response;
public Response(String response) {
this.response = response;
}
public String getResponse() {
return response;
}
@Override
public String toString() {
return "Response: " + response;
}
}
public static class CertificateOutput {
String name;
public CertificateOutput(String certName) {
name = certName;
}
public String getName() {
return name;
}
}
} | class PollerTests {
private static final Duration STEPVERIFIER_TIMEOUT = Duration.ofSeconds(30);
@Test
public void asyncPollerConstructorPollIntervalZero() {
assertThrows(IllegalArgumentException.class, () -> new PollerFlux<>(Duration.ZERO, ignored -> null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorPollIntervalNegative() {
assertThrows(IllegalArgumentException.class, () -> new PollerFlux<>(Duration.ofSeconds(-1), ignored -> null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorPollIntervalNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(null, ignored -> null, ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorActivationOperationNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), null, ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorPollOperationNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null, null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void asyncPollerConstructorCancelOperationNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null,
ignored -> null, null, ignored -> null));
}
@Test
public void asyncPollerConstructorFetchResultOperationNull() {
assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null,
ignored -> null, (ignored1, ignored2) -> null,
null));
}
@Test
public void subscribeToSpecificOtherOperationStatusTest() {
final Duration retryAfter = Duration.ofMillis(10);
PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter);
PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_1", false),
new Response("2"), retryAfter);
PollResponse<Response> response3 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_2", false),
new Response("3"), retryAfter);
PollResponse<Response> response4 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("4"), retryAfter);
Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.empty();
int[] callCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (callCount[0]++) {
case 0: return Mono.just(response0);
case 1: return Mono.just(response1);
case 2: return Mono.just(response2);
case 3: return Mono.just(response3);
case 4: return Mono.just(response4);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response3.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response4.getStatus())
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
}
@Test
public void noPollingForSynchronouslyCompletedActivationTest() {
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse
= ignored -> Mono.fromCallable(() -> {
activationCallCount[0]++;
return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("ActivationDone"));
});
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.error(new RuntimeException("Polling shouldn't happen for synchronously completed activation."));
PollerFlux<Response, CertificateOutput> pollerFlux = create(Duration.ofMillis(10),
activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus()
== LongRunningOperationStatus.SUCCESSFULLY_COMPLETED)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
assertEquals(1, activationCallCount[0]);
}
@Test
public void noPollingForSynchronouslyCompletedActivationInSyncPollerTest() {
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse
= ignored -> Mono.fromCallable(() -> {
activationCallCount[0]++;
return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("ActivationDone"));
});
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.error(new RuntimeException("Polling shouldn't happen for synchronously completed activation."));
SyncPoller<Response, CertificateOutput> syncPoller = create(Duration.ofMillis(10),
activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null,
ignored -> (Mono<CertificateOutput>) null)
.getSyncPoller();
try {
PollResponse<Response> response = syncPoller.waitForCompletion(Duration.ofSeconds(1));
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus());
assertEquals(1, activationCallCount[0]);
} catch (Exception e) {
fail("SyncPoller did not complete on activation", e);
}
}
@Test
public void ensurePollingForInProgressActivationResponseTest() {
final Duration retryAfter = Duration.ofMillis(10);
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse
= ignored -> Mono.fromCallable(() -> {
activationCallCount[0]++;
return new PollResponse<>(IN_PROGRESS, new Response("ActivationDone"));
});
PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter);
PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_1", false),
new Response("2"), retryAfter);
PollResponse<Response> response3 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("3"), retryAfter);
int[] callCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (callCount[0]++) {
case 0: return Mono.just(response0);
case 1: return Mono.just(response1);
case 2: return Mono.just(response2);
case 3: return Mono.just(response3);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = create(Duration.ofMillis(10),
activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.assertNext(asyncPollResponse -> assertEquals(response0.getStatus(), asyncPollResponse.getStatus()))
.assertNext(asyncPollResponse -> assertEquals(response1.getStatus(), asyncPollResponse.getStatus()))
.assertNext(asyncPollResponse -> assertEquals(response2.getStatus(), asyncPollResponse.getStatus()))
.assertNext(asyncPollResponse -> assertEquals(response3.getStatus(), asyncPollResponse.getStatus()))
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
assertEquals(1, activationCallCount[0]);
}
@Test
public void subscribeToActivationOnlyOnceTest() {
final Duration retryAfter = Duration.ofMillis(10);
PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter);
PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), retryAfter);
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.fromCallable(() -> {
activationCallCount[0]++;
return new Response("ActivationDone");
});
int[] pollCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (pollCallCount[0]++) {
case 0: return Mono.just(response0);
case 1: return Mono.just(response1);
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus())
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
pollCallCount[0] = 0;
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus())
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus())
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
assertEquals(1, activationCallCount[0]);
}
@Test
public void cancellationCanBeCalledFromOperatorChainTest() {
final Duration retryAfter = Duration.ofMillis(10);
PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter);
PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), retryAfter);
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] callCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (callCount[0]++) {
case 0: return Mono.just(response0);
case 1: return Mono.just(response1);
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
final List<Object> cancelParameters = new ArrayList<>();
BiFunction<PollingContext<Response>, PollResponse<Response>, Mono<Response>> cancelOperation
= (pollingContext, pollResponse) -> {
Collections.addAll(cancelParameters, pollingContext, pollResponse);
return Mono.just(new Response("OperationCancelled"));
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, cancelOperation, ignored -> null);
AtomicReference<AsyncPollResponse<Response, CertificateOutput>> secondAsyncResponse = new AtomicReference<>();
Response cancelResponse = pollerFlux
.take(2)
.last()
.flatMap((Function<AsyncPollResponse<Response, CertificateOutput>, Mono<Response>>) asyncPollResponse -> {
secondAsyncResponse.set(asyncPollResponse);
return asyncPollResponse.cancelOperation();
}).block();
Assertions.assertNotNull(cancelResponse);
Assertions.assertTrue(cancelResponse.getResponse().equalsIgnoreCase("OperationCancelled"));
Assertions.assertNotNull(secondAsyncResponse.get());
Assertions.assertEquals("1", secondAsyncResponse.get().getValue().getResponse());
assertEquals(2, cancelParameters.size());
assertEquals(activationResponse, ((PollingContext<?>) cancelParameters.get(0)).getActivationResponse()
.getValue());
assertEquals(activationResponse, ((PollResponse<?>) cancelParameters.get(1)).getValue());
}
@Test
public void getResultCanBeCalledFromOperatorChainTest() {
final Duration retryAfter = Duration.ofMillis(10);
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), retryAfter);
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] callCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (callCount[0]++) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter));
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
final List<PollingContext<Response>> fetchResultParameters = new ArrayList<>();
Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation = pollingContext -> {
fetchResultParameters.add(pollingContext);
return Mono.just(new CertificateOutput("LROFinalResult"));
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
AtomicReference<AsyncPollResponse<Response, CertificateOutput>> terminalAsyncResponse = new AtomicReference<>();
CertificateOutput lroResult = pollerFlux
.takeUntil(apr -> apr.getStatus().isComplete())
.last()
.flatMap((Function<AsyncPollResponse<Response, CertificateOutput>, Mono<CertificateOutput>>)
asyncPollResponse -> {
terminalAsyncResponse.set(asyncPollResponse);
return asyncPollResponse.getFinalResult();
}).block();
Assertions.assertNotNull(lroResult);
Assertions.assertTrue(lroResult.getName().equalsIgnoreCase("LROFinalResult"));
Assertions.assertNotNull(terminalAsyncResponse.get());
Assertions.assertTrue(terminalAsyncResponse.get().getValue().getResponse().equalsIgnoreCase("2"));
assertEquals(1, fetchResultParameters.size());
PollingContext<Response> pollingContext = fetchResultParameters.get(0);
assertEquals(activationResponse, pollingContext.getActivationResponse().getValue());
assertEquals(response2, pollingContext.getLatestResponse());
}
@Test
public void verifyExceptionPropagationFromPollingOperation() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
final AtomicInteger cnt = new AtomicInteger();
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> {
int count = cnt.incrementAndGet();
if (count <= 2) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1")));
} else if (count == 3) {
return Mono.error(new RuntimeException("Polling operation failed!"));
} else if (count == 4) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("2")));
} else {
return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3")));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS)
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS)
.expectErrorMessage("Polling operation failed!")
.verify(STEPVERIFIER_TIMEOUT);
}
@Test
public void verifyErrorFromPollingOperation() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
final AtomicInteger cnt = new AtomicInteger();
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> {
int count = cnt.incrementAndGet();
if (count <= 2) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1")));
} else if (count == 3) {
return Mono.just(new PollResponse<>(FAILED, new Response("2")));
} else if (count == 4) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("3")));
} else {
return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("4")));
}
};
PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10),
activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null);
StepVerifier.create(pollerFlux)
.expectSubscription()
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS)
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS)
.expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == FAILED)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
}
@Test
public void syncPollerConstructorPollIntervalZero() {
assertThrows(IllegalArgumentException.class, () -> new SyncOverAsyncPoller<>(Duration.ZERO,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNegative() {
assertThrows(IllegalArgumentException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(-1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(null,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncConstructorActivationOperationNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollOperationNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorCancelOperationNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, null,
ignored -> null));
}
@Test
public void syncPollerConstructorFetchResultOperationNull() {
assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, null));
}
@Test
public void syncPollerShouldCallActivationFromConstructor() {
Boolean[] activationCalled = new Boolean[1];
activationCalled[0] = false;
Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.fromCallable(() -> {
activationCalled[0] = true;
return new Response("ActivationDone");
});
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
ignored -> null, (ignored1, ignored2) -> null, ignored -> null);
Assertions.assertTrue(activationCalled[0]);
}
@Test
public void eachPollShouldReceiveLastPollResponse() {
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(new Response("A"));
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = pollingContext -> {
Assertions.assertNotNull(pollingContext.getActivationResponse());
Assertions.assertNotNull(pollingContext.getLatestResponse());
PollResponse<Response> latestResponse = pollingContext.getLatestResponse();
Assertions.assertNotNull(latestResponse);
return Mono.just(new PollResponse<>(IN_PROGRESS,
new Response(latestResponse.getValue().toString() + "A"), Duration.ofMillis(10)));
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: AA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: AAA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: Response: AAAA"));
}
@Test
public void waitForCompletionShouldReturnTerminalPollResponse() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), Duration.ofMillis(10));
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] pollCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (pollCallCount[0]++) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}
@Test
public void getResultShouldPollUntilCompletionAndFetchResult() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
case 2: return Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), Duration.ofMillis(10)));
default: return Mono.error(new RuntimeException("Poll should not be called after terminal response"));
}
};
Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation
= ignored -> Mono.just(new CertificateOutput("cert1"));
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
assertEquals(2, invocationCount[0]);
}
@Test
public void getResultShouldNotPollOnCompletedPoller() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), Duration.ofMillis(10));
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation
= ignored -> Mono.just(new CertificateOutput("cert1"));
int[] pollCallCount = new int[1];
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
switch (pollCallCount[0]++) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
case 2: return Mono.just(response2);
default: return Mono.error(new IllegalStateException("Too many requests"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
}
@Test
public void waitUntilShouldPollAfterMatchingStatus() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
LongRunningOperationStatus matchStatus
= LongRunningOperationStatus.fromString("OTHER_1", false);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10)));
case 2: return Mono.just(new PollResponse<>(matchStatus, new Response("1"), Duration.ofMillis(10)));
default: return Mono.error(new RuntimeException("Poll should not be called after matching response"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(2, invocationCount[0]);
}
@Test
public void verifyExceptionPropagationFromPollingOperationSyncPoller() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
final AtomicInteger cnt = new AtomicInteger();
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> {
int count = cnt.incrementAndGet();
if (count <= 2) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1")));
} else if (count == 3) {
return Mono.error(new RuntimeException("Polling operation failed!"));
} else if (count == 4) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("2")));
} else {
return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3")));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class, poller::getFinalResult);
assertEquals(exception.getMessage(), "Polling operation failed!");
}
@Test
public void testPollerFluxError() throws InterruptedException {
IllegalArgumentException expectedException = new IllegalArgumentException();
PollerFlux<String, String> pollerFlux = error(expectedException);
CountDownLatch countDownLatch = new CountDownLatch(1);
pollerFlux.subscribe(
response -> Assertions.fail("Did not expect a response"),
ex -> {
countDownLatch.countDown();
Assertions.assertSame(expectedException, ex);
},
() -> Assertions.fail("Did not expect the flux to complete")
);
boolean completed = countDownLatch.await(1, TimeUnit.SECONDS);
Assertions.assertTrue(completed);
}
@Test
public void testSyncPollerError() {
PollerFlux<String, String> pollerFlux = error(new IllegalArgumentException());
Assertions.assertThrows(IllegalArgumentException.class, pollerFlux::getSyncPoller);
}
@Test
public void testUpdatePollingIntervalWithoutVirtualTimer() {
PollerFlux<String, String> pollerFlux = PollerFlux.create(Duration.ofMillis(10),
context -> Mono.just(new PollResponse<>(IN_PROGRESS, "Activation")),
context -> Mono.just(new PollResponse<>(IN_PROGRESS, "PollOperation")),
(context, response) -> Mono.just("Cancel"),
context -> Mono.just("FinalResult"));
pollerFlux.setPollInterval(Duration.ofMillis(200));
StepVerifier.create(pollerFlux.take(5))
.thenAwait(Duration.ofSeconds(1))
.expectNextCount(5)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
}
@Test
public void testUpdatePollingInterval() {
PollerFlux<String, String> pollerFlux = PollerFlux.create(Duration.ofMillis(10),
context -> Mono.just(new PollResponse<>(IN_PROGRESS, "Activation")),
context -> Mono.just(new PollResponse<>(IN_PROGRESS, "PollOperation")),
(context, response) -> Mono.just("Cancel"),
context -> Mono.just("FinalResult"));
StepVerifier.create(pollerFlux.take(5))
.thenAwait(Duration.ofMillis(55))
.expectNextCount(5)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
pollerFlux.setPollInterval(Duration.ofMillis(50));
StepVerifier.create(pollerFlux.take(5))
.thenAwait(Duration.ofMillis(255))
.expectNextCount(5)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
pollerFlux.setPollInterval(Duration.ofMillis(195));
StepVerifier.create(pollerFlux.take(5))
.thenAwait(Duration.ofSeconds(1))
.expectNextCount(5)
.expectComplete()
.verify(STEPVERIFIER_TIMEOUT);
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitForCompletionSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception));
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void waitForCompletionOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
} else {
return Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception));
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitUntilSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(Duration.ofMillis(100), SUCCESSFULLY_COMPLETED);
assertEquals(activationResponse.getResponse(), pollResponse.getValue().getResponse());
}
/**
* Tests that the last received PollResponse is used when waitUtil times out.
*/
@Test
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void getFinalResultSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored ->
Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception));
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void getFinalResultOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Mono<Response>> activationOperation
= ignored -> Mono.just(activationResponse);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
} else {
return Mono.delay(Duration.ofSeconds(2))
.map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)));
}
};
SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception));
}
private static String printException(Throwable throwable) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
throwable.printStackTrace(pw);
return sw.toString();
}
public static class Response {
private final String response;
public Response(String response) {
this.response = response;
}
public String getResponse() {
return response;
}
@Override
public String toString() {
return "Response: " + response;
}
}
public static class CertificateOutput {
String name;
public CertificateOutput(String certName) {
name = certName;
}
public String getName() {
return name;
}
}
} |
The issue appears to be with low polling intervals there can be contention which results in this failing. Logically, we can see the sleep will never complete before the timeout but in some cases due to scheduling this can actually pass, resulting in the test failing. | public void waitUntilOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
PollResponse<Response> expected = new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return expected;
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = assertDoesNotThrow(() -> poller.waitUntil(Duration.ofMillis(1000),
SUCCESSFULLY_COMPLETED));
assertEquals(expected.getValue().getResponse(), pollResponse.getValue().getResponse());
} | Thread.sleep(10000); | public void waitUntilOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
PollResponse<Response> expected = new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return expected;
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = assertDoesNotThrow(() -> poller.waitUntil(Duration.ofMillis(1000),
SUCCESSFULLY_COMPLETED));
assertEquals(expected.getValue().getResponse(), pollResponse.getValue().getResponse());
} | class SimpleSyncPollerTests {
private static final Duration TEN_MILLIS = Duration.ofMillis(10);
@Test
public void noPollingForSynchronouslyCompletedActivationInSyncPollerTest() {
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, PollResponse<Response>> activationOperationWithResponse = ignored -> {
activationCallCount[0]++;
return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("ActivationDone"));
};
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
throw new RuntimeException("Polling shouldn't happen for synchronously completed activation.");
};
SyncPoller<Response, CertificateOutput> syncPoller = new SimpleSyncPoller<>(TEN_MILLIS,
activationOperationWithResponse, pollOperation, (ignored1, ignore2) -> null, ignored -> null);
try {
PollResponse<Response> response = syncPoller.waitForCompletion(Duration.ofSeconds(1));
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus());
assertEquals(1, activationCallCount[0]);
} catch (Exception e) {
fail("SyncPoller did not complete on activation", e);
}
}
@Test
public void syncPollerConstructorPollIntervalZero() {
assertThrows(IllegalArgumentException.class, () -> new SimpleSyncPoller<>(Duration.ZERO,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNegative() {
assertThrows(IllegalArgumentException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(-1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(null,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncConstructorActivationOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1), null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), null, (ignored1, ignored2) -> null,
ignored -> null));
}
@Test
public void syncPollerConstructorCancelOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, null,
ignored -> null));
}
@Test
public void syncPollerConstructorFetchResultOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, null));
}
@Test
public void syncPollerShouldCallActivationFromConstructor() {
boolean[] activationCalled = new boolean[1];
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response>
activationOperation = ignored -> {
activationCalled[0] = true;
return new Response("ActivationDone");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
ignored -> null, (ignored1, ignored2) -> null, ignored -> null);
Assertions.assertTrue(activationCalled[0]);
}
@Test
public void eachPollShouldReceiveLastPollResponse() {
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> new Response("A");
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = pollingContext -> {
Assertions.assertNotNull(pollingContext.getActivationResponse());
Assertions.assertNotNull(pollingContext.getLatestResponse());
PollResponse<Response> latestResponse = pollingContext.getLatestResponse();
Assertions.assertNotNull(latestResponse);
return new PollResponse<>(IN_PROGRESS, new Response(latestResponse.getValue().toString() + "A"),
TEN_MILLIS);
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: AA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: AAA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: Response: AAAA"));
}
@Test
public void waitForCompletionShouldReturnTerminalPollResponse() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return response2;
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(response2.getValue().getResponse(), poller.waitForCompletion().getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}
@Test
public void getResultShouldPollUntilCompletionAndFetchResult() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
Function<PollingContext<Response>, CertificateOutput> fetchResultOperation
= ignored -> new CertificateOutput("cert1");
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
assertEquals(2, invocationCount[0]);
}
@Test
public void getResultShouldNotPollOnCompletedPoller() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
Function<PollingContext<Response>, CertificateOutput> fetchResultOperation
= ignored -> new CertificateOutput("cert1");
int[] invocationCount = new int[] { -1 };
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return response2;
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
}
@Test
public void waitUntilShouldPollAfterMatchingStatus() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
LongRunningOperationStatus matchStatus
= LongRunningOperationStatus.fromString("OTHER_1", false);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(matchStatus, new Response("1"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(2, invocationCount[0]);
}
@Test
public void verifyExceptionPropagationFromPollingOperationSyncPoller() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
final AtomicReference<Integer> cnt = new AtomicReference<>(0);
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
cnt.getAndSet(cnt.get() + 1);
if (cnt.get() <= 2) {
return new PollResponse<>(IN_PROGRESS, new Response("1"));
} else if (cnt.get() == 3) {
throw new RuntimeException("Polling operation failed!");
} else if (cnt.get() == 4) {
return new PollResponse<>(IN_PROGRESS, new Response("2"));
} else {
return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class, poller::getFinalResult);
assertTrue(exception.getMessage().contains("Polling operation failed!"));
}
@Test
public void testPollerFluxError() throws InterruptedException {
IllegalArgumentException expectedException = new IllegalArgumentException();
PollerFlux<String, String> pollerFlux = error(expectedException);
CountDownLatch countDownLatch = new CountDownLatch(1);
pollerFlux.subscribe(
response -> Assertions.fail("Did not expect a response"),
ex -> {
countDownLatch.countDown();
Assertions.assertSame(expectedException, ex);
},
() -> Assertions.fail("Did not expect the flux to complete")
);
boolean completed = countDownLatch.await(1, TimeUnit.SECONDS);
Assertions.assertTrue(completed);
}
@Test
public void waitUntilShouldPollToCompletion() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
LongRunningOperationStatus matchStatus = SUCCESSFULLY_COMPLETED;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("2"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after matching response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(matchStatus, poller.waitUntil(matchStatus).getStatus());
assertEquals(2, invocationCount[0]);
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitForCompletionSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void waitForCompletionOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitUntilSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Response> activationOperation = ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(Duration.ofMillis(100), SUCCESSFULLY_COMPLETED);
assertEquals(activationResponse.getResponse(), pollResponse.getValue().getResponse());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't reach the {@code statusToWaitFor} within the timeout period.
*/
@Test
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void getFinalResultSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void getFinalResultOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
public static class Response {
private final String response;
public Response(String response) {
this.response = response;
}
public String getResponse() {
return response;
}
@Override
public String toString() {
return "Response: " + response;
}
}
public static class CertificateOutput {
String name;
public CertificateOutput(String certName) {
name = certName;
}
public String getName() {
return name;
}
}
} | class SimpleSyncPollerTests {
private static final Duration TEN_MILLIS = Duration.ofMillis(10);
@Test
public void noPollingForSynchronouslyCompletedActivationInSyncPollerTest() {
int[] activationCallCount = new int[1];
Function<PollingContext<Response>, PollResponse<Response>> activationOperationWithResponse = ignored -> {
activationCallCount[0]++;
return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("ActivationDone"));
};
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
throw new RuntimeException("Polling shouldn't happen for synchronously completed activation.");
};
SyncPoller<Response, CertificateOutput> syncPoller = new SimpleSyncPoller<>(TEN_MILLIS,
activationOperationWithResponse, pollOperation, (ignored1, ignore2) -> null, ignored -> null);
try {
PollResponse<Response> response = syncPoller.waitForCompletion(Duration.ofSeconds(1));
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus());
assertEquals(1, activationCallCount[0]);
} catch (Exception e) {
fail("SyncPoller did not complete on activation", e);
}
}
@Test
public void syncPollerConstructorPollIntervalZero() {
assertThrows(IllegalArgumentException.class, () -> new SimpleSyncPoller<>(Duration.ZERO,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNegative() {
assertThrows(IllegalArgumentException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(-1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollIntervalNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(null,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncConstructorActivationOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1), null,
ignored -> null, (ignored1, ignored2) -> null, ignored -> null));
}
@Test
public void syncPollerConstructorPollOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), null, (ignored1, ignored2) -> null,
ignored -> null));
}
@Test
public void syncPollerConstructorCancelOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, null,
ignored -> null));
}
@Test
public void syncPollerConstructorFetchResultOperationNull() {
assertThrows(NullPointerException.class, () -> new SimpleSyncPoller<>(Duration.ofSeconds(1),
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null,
(ignored1, ignored2) -> null, null));
}
@Test
public void syncPollerShouldCallActivationFromConstructor() {
boolean[] activationCalled = new boolean[1];
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response>
activationOperation = ignored -> {
activationCalled[0] = true;
return new Response("ActivationDone");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
ignored -> null, (ignored1, ignored2) -> null, ignored -> null);
Assertions.assertTrue(activationCalled[0]);
}
@Test
public void eachPollShouldReceiveLastPollResponse() {
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> new Response("A");
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = pollingContext -> {
Assertions.assertNotNull(pollingContext.getActivationResponse());
Assertions.assertNotNull(pollingContext.getLatestResponse());
PollResponse<Response> latestResponse = pollingContext.getLatestResponse();
Assertions.assertNotNull(latestResponse);
return new PollResponse<>(IN_PROGRESS, new Response(latestResponse.getValue().toString() + "A"),
TEN_MILLIS);
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: AA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: AAA"));
pollResponse = poller.poll();
Assertions.assertNotNull(pollResponse);
Assertions.assertNotNull(pollResponse.getValue().getResponse());
Assertions.assertTrue(pollResponse.getValue()
.getResponse()
.equalsIgnoreCase("Response: Response: Response: AAAA"));
}
@Test
public void waitForCompletionShouldReturnTerminalPollResponse() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return response2;
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(response2.getValue().getResponse(), poller.waitForCompletion().getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
}
@Test
public void getResultShouldPollUntilCompletionAndFetchResult() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
Function<PollingContext<Response>, CertificateOutput> fetchResultOperation
= ignored -> new CertificateOutput("cert1");
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
assertEquals(2, invocationCount[0]);
}
@Test
public void getResultShouldNotPollOnCompletedPoller() {
PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED,
new Response("2"), TEN_MILLIS);
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
Function<PollingContext<Response>, CertificateOutput> fetchResultOperation
= ignored -> new CertificateOutput("cert1");
int[] invocationCount = new int[] { -1 };
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return response2;
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, fetchResultOperation);
PollResponse<Response> pollResponse = poller.waitForCompletion();
Assertions.assertNotNull(pollResponse.getValue());
assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse());
assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus());
CertificateOutput certificateOutput = poller.getFinalResult();
Assertions.assertNotNull(certificateOutput);
assertEquals("cert1", certificateOutput.getName());
}
@Test
public void waitUntilShouldPollAfterMatchingStatus() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
LongRunningOperationStatus matchStatus
= LongRunningOperationStatus.fromString("OTHER_1", false);
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(matchStatus, new Response("1"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after terminal response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(2, invocationCount[0]);
}
@Test
public void verifyExceptionPropagationFromPollingOperationSyncPoller() {
final Response activationResponse = new Response("Foo");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
final AtomicReference<Integer> cnt = new AtomicReference<>(0);
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
cnt.getAndSet(cnt.get() + 1);
if (cnt.get() <= 2) {
return new PollResponse<>(IN_PROGRESS, new Response("1"));
} else if (cnt.get() == 3) {
throw new RuntimeException("Polling operation failed!");
} else if (cnt.get() == 4) {
return new PollResponse<>(IN_PROGRESS, new Response("2"));
} else {
return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3"));
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class, poller::getFinalResult);
assertTrue(exception.getMessage().contains("Polling operation failed!"));
}
@Test
public void testPollerFluxError() throws InterruptedException {
IllegalArgumentException expectedException = new IllegalArgumentException();
PollerFlux<String, String> pollerFlux = error(expectedException);
CountDownLatch countDownLatch = new CountDownLatch(1);
pollerFlux.subscribe(
response -> Assertions.fail("Did not expect a response"),
ex -> {
countDownLatch.countDown();
Assertions.assertSame(expectedException, ex);
},
() -> Assertions.fail("Did not expect the flux to complete")
);
boolean completed = countDownLatch.await(1, TimeUnit.SECONDS);
Assertions.assertTrue(completed);
}
@Test
public void waitUntilShouldPollToCompletion() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
LongRunningOperationStatus matchStatus = SUCCESSFULLY_COMPLETED;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
switch (invocationCount[0]) {
case 0: return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
case 1: return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
case 2: return new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("2"), TEN_MILLIS);
default: throw new RuntimeException("Poll should not be called after matching response");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(matchStatus);
assertEquals(matchStatus, pollResponse.getStatus());
assertEquals(matchStatus, poller.waitUntil(matchStatus).getStatus());
assertEquals(2, invocationCount[0]);
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitForCompletionSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void waitForCompletionOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.waitForCompletion(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void waitUntilSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<Response>, Response> activationOperation = ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
PollResponse<Response> pollResponse = poller.waitUntil(Duration.ofMillis(100), SUCCESSFULLY_COMPLETED);
assertEquals(activationResponse.getResponse(), pollResponse.getValue().getResponse());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't reach the {@code statusToWaitFor} within the timeout period.
*/
@Test
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer
* than the timeout period.
*/
@Test
public void getFinalResultSinglePollTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
}
throw new RuntimeException("Poll should not be called more than once");
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
/**
* Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation
* doesn't complete within the timeout period.
*/
@Test
public void getFinalResultOperationTimesOut() {
final Response activationResponse = new Response("Activated");
Function<PollingContext<SimpleSyncPollerTests.Response>, SimpleSyncPollerTests.Response> activationOperation
= ignored -> activationResponse;
int[] invocationCount = new int[1];
invocationCount[0] = -1;
Function<PollingContext<Response>, PollResponse<Response>> pollOperation = ignored -> {
invocationCount[0]++;
if (invocationCount[0] == 0) {
return new PollResponse<>(IN_PROGRESS, new Response("0"), TEN_MILLIS);
} else if (invocationCount[0] == 1) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return new PollResponse<>(IN_PROGRESS, new Response("1"), TEN_MILLIS);
} else {
throw new RuntimeException("Poll should not be called more than twice");
}
};
SyncPoller<Response, CertificateOutput> poller = new SimpleSyncPoller<>(TEN_MILLIS,
cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt)),
pollOperation, (ignored1, ignored2) -> null, ignored -> null);
RuntimeException exception = assertThrows(RuntimeException.class,
() -> poller.getFinalResult(Duration.ofMillis(100)));
assertInstanceOf(TimeoutException.class, exception.getCause());
}
public static class Response {
private final String response;
public Response(String response) {
this.response = response;
}
public String getResponse() {
return response;
}
@Override
public String toString() {
return "Response: " + response;
}
}
public static class CertificateOutput {
String name;
public CertificateOutput(String certName) {
name = certName;
}
public String getName() {
return name;
}
}
} |
To make sure I understand correctly, inside the async client, there is already a tryTimeout applied when performing this operation, so it's duplicated? | public EventHubProperties getEventHubProperties() {
return producer.getEventHubProperties().block();
} | return producer.getEventHubProperties().block(); | public EventHubProperties getEventHubProperties() {
return producer.getEventHubProperties().block();
} | class EventHubProducerClient implements Closeable {
private final EventHubProducerAsyncClient producer;
/**
* Creates a new instance of {@link EventHubProducerClient} that sends messages to an Azure Event Hub.
*
* @throws NullPointerException if {@code producer} or {@code tryTimeout} is null.
*/
EventHubProducerClient(EventHubProducerAsyncClient producer) {
this.producer = Objects.requireNonNull(producer, "'producer' cannot be null.");
}
/**
* Gets the Event Hub name this client interacts with.
*
* @return The Event Hub name this client interacts with.
*/
public String getEventHubName() {
return producer.getEventHubName();
}
/**
* Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Event Hubs namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return producer.getFullyQualifiedNamespace();
}
/**
* Retrieves information about an Event Hub, including the number of partitions present and their identifiers.
*
* @return The set of information for the Event Hub that this client is associated with.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Retrieves the identifiers for the partitions of an Event Hub.
*
* @return A Flux of identifiers for the partitions of an Event Hub.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public IterableStream<String> getPartitionIds() {
return new IterableStream<>(producer.getPartitionIds());
}
/**
* Retrieves information about a specific partition for an Event Hub, including elements that describe the available
* events in the partition event stream.
*
* @param partitionId The unique identifier of a partition associated with the Event Hub.
* @return The set of information for the requested partition under the Event Hub this client is associated with.
* @throws NullPointerException if {@code partitionId} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PartitionProperties getPartitionProperties(String partitionId) {
return producer.getPartitionProperties(partitionId).block();
}
/**
* Creates an {@link EventDataBatch} that can fit as many events as the transport allows.
*
* @return A new {@link EventDataBatch} that can fit as many events as the transport allows.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public EventDataBatch createBatch() {
return producer.createBatch().block();
}
/**
* Creates an {@link EventDataBatch} configured with the options specified.
*
* @param options A set of options used to configure the {@link EventDataBatch}.
*
* @return A new {@link EventDataBatch} that can fit as many events as the transport allows.
*
* @throws NullPointerException if {@code options} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public EventDataBatch createBatch(CreateBatchOptions options) {
return producer.createBatch(options).block();
}
/**
* Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size
* allowed, an exception will be triggered and the send will fail.
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param event Event to send to the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void send(EventData event) {
producer.send(event).block();
}
/**
* Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds
* the maximum size allowed, an exception will be triggered and the send will fail.
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param event Event to send to the service.
* @param options The set of options to consider when sending this event.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void send(EventData event, SendOptions options) {
producer.send(event, options).block();
}
/**
* <p>Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the
* maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message
* size is the max amount allowed on the link.</p>
*
* <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send
* <pre>
* List<EventData> events = Arrays.asList&
* new EventData&
* producer.send&
* </pre>
* <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param events Events to send to the service.
* @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(Iterable<EventData> events) {
producer.send(events).block();
}
/**
* <p>Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the
* maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message
* size is the max amount allowed on the link.</p>
*
* <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send
* <pre>
* TokenCredential credential = new DefaultAzureCredentialBuilder&
*
* EventHubProducerClient producer = new EventHubClientBuilder&
* .credential&
* credential&
* .buildProducerClient&
*
* List<EventData> events = Arrays.asList&
* new EventData&
*
* SendOptions sendOptions = new SendOptions&
* producer.send&
* </pre>
* <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param events Events to send to the service.
* @param options The set of options to consider when sending this batch.
* @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(Iterable<EventData> events, SendOptions options) {
producer.send(events, options).block();
}
/**
* Sends the batch to the associated Event Hub.
*
* @param batch The batch to send to the service.
* @throws NullPointerException if {@code batch} is {@code null}.
* @see EventHubProducerClient
* @see EventHubProducerClient
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(EventDataBatch batch) {
producer.send(batch).block();
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
producer.close();
}
/**
* Gets the client identifier.
*
* @return The unique identifier string for current client.
*/
public String getIdentifier() {
return producer.getIdentifier();
}
} | class EventHubProducerClient implements Closeable {
private final EventHubProducerAsyncClient producer;
/**
* Creates a new instance of {@link EventHubProducerClient} that sends messages to an Azure Event Hub.
*
* @throws NullPointerException if {@code producer} or {@code tryTimeout} is null.
*/
EventHubProducerClient(EventHubProducerAsyncClient producer) {
this.producer = Objects.requireNonNull(producer, "'producer' cannot be null.");
}
/**
* Gets the Event Hub name this client interacts with.
*
* @return The Event Hub name this client interacts with.
*/
public String getEventHubName() {
return producer.getEventHubName();
}
/**
* Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Event Hubs namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return producer.getFullyQualifiedNamespace();
}
/**
* Retrieves information about an Event Hub, including the number of partitions present and their identifiers.
*
* @return The set of information for the Event Hub that this client is associated with.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Retrieves the identifiers for the partitions of an Event Hub.
*
* @return A Flux of identifiers for the partitions of an Event Hub.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public IterableStream<String> getPartitionIds() {
return new IterableStream<>(producer.getPartitionIds());
}
/**
* Retrieves information about a specific partition for an Event Hub, including elements that describe the available
* events in the partition event stream.
*
* @param partitionId The unique identifier of a partition associated with the Event Hub.
* @return The set of information for the requested partition under the Event Hub this client is associated with.
* @throws NullPointerException if {@code partitionId} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PartitionProperties getPartitionProperties(String partitionId) {
return producer.getPartitionProperties(partitionId).block();
}
/**
* Creates an {@link EventDataBatch} that can fit as many events as the transport allows.
*
* @return A new {@link EventDataBatch} that can fit as many events as the transport allows.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public EventDataBatch createBatch() {
return producer.createBatch().block();
}
/**
* Creates an {@link EventDataBatch} configured with the options specified.
*
* @param options A set of options used to configure the {@link EventDataBatch}.
*
* @return A new {@link EventDataBatch} that can fit as many events as the transport allows.
*
* @throws NullPointerException if {@code options} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public EventDataBatch createBatch(CreateBatchOptions options) {
return producer.createBatch(options).block();
}
/**
* Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size
* allowed, an exception will be triggered and the send will fail.
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param event Event to send to the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void send(EventData event) {
producer.send(event).block();
}
/**
* Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds
* the maximum size allowed, an exception will be triggered and the send will fail.
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param event Event to send to the service.
* @param options The set of options to consider when sending this event.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void send(EventData event, SendOptions options) {
producer.send(event, options).block();
}
/**
* <p>Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the
* maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message
* size is the max amount allowed on the link.</p>
*
* <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send
* <pre>
* List<EventData> events = Arrays.asList&
* new EventData&
* producer.send&
* </pre>
* <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param events Events to send to the service.
* @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(Iterable<EventData> events) {
producer.send(events).block();
}
/**
* <p>Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the
* maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message
* size is the max amount allowed on the link.</p>
*
* <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send
* <pre>
* TokenCredential credential = new DefaultAzureCredentialBuilder&
*
* EventHubProducerClient producer = new EventHubClientBuilder&
* .credential&
* credential&
* .buildProducerClient&
*
* List<EventData> events = Arrays.asList&
* new EventData&
*
* SendOptions sendOptions = new SendOptions&
* producer.send&
* </pre>
* <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param events Events to send to the service.
* @param options The set of options to consider when sending this batch.
* @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(Iterable<EventData> events, SendOptions options) {
producer.send(events, options).block();
}
/**
* Sends the batch to the associated Event Hub.
*
* @param batch The batch to send to the service.
* @throws NullPointerException if {@code batch} is {@code null}.
* @see EventHubProducerClient
* @see EventHubProducerClient
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(EventDataBatch batch) {
producer.send(batch).block();
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
producer.close();
}
/**
* Gets the client identifier.
*
* @return The unique identifier string for current client.
*/
public String getIdentifier() {
return producer.getIdentifier();
}
} |
yes, async client already applies retry policy (and checks for timeouts). I can't validate all possible branches, but `getEventHubProperties` uses management channel https://github.com/Azure/azure-sdk-for-java/blob/c74bb20a471d81fea8ac5b5afb8343eafd4c09f6/sdk/eventhubs/azure-messaging-eventhubs/src/main/java/com/azure/messaging/eventhubs/implementation/ManagementChannel.java#L161 which uses `RetryUtil.withRetry` underneath https://github.com/Azure/azure-sdk-for-java/blob/ac66fd5bbf8e65590d0121cdec721eafdc98843e/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/implementation/RequestResponseChannel.java#L340 https://github.com/Azure/azure-sdk-for-java/blob/ac66fd5bbf8e65590d0121cdec721eafdc98843e/sdk/core/azure-core-amqp/src/main/java/com/azure/core/amqp/implementation/RetryUtil.java#L68-L73 Getting management channel, creating session, etc is also guarded with timeouts, so if timeout happens, it will be retried based on the retry policy. E.g. - first sendWithAck will time out after 30 sec - next one will succeed Since we had timeout on the sync client, the second try never had a chance to happen - operation was cancelled without applying retries. So it's not a duplication, it prevents retry policy from handling transient timeouts | public EventHubProperties getEventHubProperties() {
return producer.getEventHubProperties().block();
} | return producer.getEventHubProperties().block(); | public EventHubProperties getEventHubProperties() {
return producer.getEventHubProperties().block();
} | class EventHubProducerClient implements Closeable {
private final EventHubProducerAsyncClient producer;
/**
* Creates a new instance of {@link EventHubProducerClient} that sends messages to an Azure Event Hub.
*
* @throws NullPointerException if {@code producer} or {@code tryTimeout} is null.
*/
EventHubProducerClient(EventHubProducerAsyncClient producer) {
this.producer = Objects.requireNonNull(producer, "'producer' cannot be null.");
}
/**
* Gets the Event Hub name this client interacts with.
*
* @return The Event Hub name this client interacts with.
*/
public String getEventHubName() {
return producer.getEventHubName();
}
/**
* Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Event Hubs namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return producer.getFullyQualifiedNamespace();
}
/**
* Retrieves information about an Event Hub, including the number of partitions present and their identifiers.
*
* @return The set of information for the Event Hub that this client is associated with.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Retrieves the identifiers for the partitions of an Event Hub.
*
* @return A Flux of identifiers for the partitions of an Event Hub.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public IterableStream<String> getPartitionIds() {
return new IterableStream<>(producer.getPartitionIds());
}
/**
* Retrieves information about a specific partition for an Event Hub, including elements that describe the available
* events in the partition event stream.
*
* @param partitionId The unique identifier of a partition associated with the Event Hub.
* @return The set of information for the requested partition under the Event Hub this client is associated with.
* @throws NullPointerException if {@code partitionId} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PartitionProperties getPartitionProperties(String partitionId) {
return producer.getPartitionProperties(partitionId).block();
}
/**
* Creates an {@link EventDataBatch} that can fit as many events as the transport allows.
*
* @return A new {@link EventDataBatch} that can fit as many events as the transport allows.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public EventDataBatch createBatch() {
return producer.createBatch().block();
}
/**
* Creates an {@link EventDataBatch} configured with the options specified.
*
* @param options A set of options used to configure the {@link EventDataBatch}.
*
* @return A new {@link EventDataBatch} that can fit as many events as the transport allows.
*
* @throws NullPointerException if {@code options} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public EventDataBatch createBatch(CreateBatchOptions options) {
return producer.createBatch(options).block();
}
/**
* Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size
* allowed, an exception will be triggered and the send will fail.
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param event Event to send to the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void send(EventData event) {
producer.send(event).block();
}
/**
* Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds
* the maximum size allowed, an exception will be triggered and the send will fail.
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param event Event to send to the service.
* @param options The set of options to consider when sending this event.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void send(EventData event, SendOptions options) {
producer.send(event, options).block();
}
/**
* <p>Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the
* maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message
* size is the max amount allowed on the link.</p>
*
* <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send
* <pre>
* List<EventData> events = Arrays.asList&
* new EventData&
* producer.send&
* </pre>
* <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param events Events to send to the service.
* @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(Iterable<EventData> events) {
producer.send(events).block();
}
/**
* <p>Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the
* maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message
* size is the max amount allowed on the link.</p>
*
* <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send
* <pre>
* TokenCredential credential = new DefaultAzureCredentialBuilder&
*
* EventHubProducerClient producer = new EventHubClientBuilder&
* .credential&
* credential&
* .buildProducerClient&
*
* List<EventData> events = Arrays.asList&
* new EventData&
*
* SendOptions sendOptions = new SendOptions&
* producer.send&
* </pre>
* <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param events Events to send to the service.
* @param options The set of options to consider when sending this batch.
* @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(Iterable<EventData> events, SendOptions options) {
producer.send(events, options).block();
}
/**
* Sends the batch to the associated Event Hub.
*
* @param batch The batch to send to the service.
* @throws NullPointerException if {@code batch} is {@code null}.
* @see EventHubProducerClient
* @see EventHubProducerClient
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(EventDataBatch batch) {
producer.send(batch).block();
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
producer.close();
}
/**
* Gets the client identifier.
*
* @return The unique identifier string for current client.
*/
public String getIdentifier() {
return producer.getIdentifier();
}
} | class EventHubProducerClient implements Closeable {
private final EventHubProducerAsyncClient producer;
/**
* Creates a new instance of {@link EventHubProducerClient} that sends messages to an Azure Event Hub.
*
* @throws NullPointerException if {@code producer} or {@code tryTimeout} is null.
*/
EventHubProducerClient(EventHubProducerAsyncClient producer) {
this.producer = Objects.requireNonNull(producer, "'producer' cannot be null.");
}
/**
* Gets the Event Hub name this client interacts with.
*
* @return The Event Hub name this client interacts with.
*/
public String getEventHubName() {
return producer.getEventHubName();
}
/**
* Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Event Hubs namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return producer.getFullyQualifiedNamespace();
}
/**
* Retrieves information about an Event Hub, including the number of partitions present and their identifiers.
*
* @return The set of information for the Event Hub that this client is associated with.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Retrieves the identifiers for the partitions of an Event Hub.
*
* @return A Flux of identifiers for the partitions of an Event Hub.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public IterableStream<String> getPartitionIds() {
return new IterableStream<>(producer.getPartitionIds());
}
/**
* Retrieves information about a specific partition for an Event Hub, including elements that describe the available
* events in the partition event stream.
*
* @param partitionId The unique identifier of a partition associated with the Event Hub.
* @return The set of information for the requested partition under the Event Hub this client is associated with.
* @throws NullPointerException if {@code partitionId} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PartitionProperties getPartitionProperties(String partitionId) {
return producer.getPartitionProperties(partitionId).block();
}
/**
* Creates an {@link EventDataBatch} that can fit as many events as the transport allows.
*
* @return A new {@link EventDataBatch} that can fit as many events as the transport allows.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public EventDataBatch createBatch() {
return producer.createBatch().block();
}
/**
* Creates an {@link EventDataBatch} configured with the options specified.
*
* @param options A set of options used to configure the {@link EventDataBatch}.
*
* @return A new {@link EventDataBatch} that can fit as many events as the transport allows.
*
* @throws NullPointerException if {@code options} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public EventDataBatch createBatch(CreateBatchOptions options) {
return producer.createBatch(options).block();
}
/**
* Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size
* allowed, an exception will be triggered and the send will fail.
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param event Event to send to the service.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void send(EventData event) {
producer.send(event).block();
}
/**
* Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds
* the maximum size allowed, an exception will be triggered and the send will fail.
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param event Event to send to the service.
* @param options The set of options to consider when sending this event.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
void send(EventData event, SendOptions options) {
producer.send(event, options).block();
}
/**
* <p>Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the
* maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message
* size is the max amount allowed on the link.</p>
*
* <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send
* <pre>
* List<EventData> events = Arrays.asList&
* new EventData&
* producer.send&
* </pre>
* <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param events Events to send to the service.
* @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(Iterable<EventData> events) {
producer.send(events).block();
}
/**
* <p>Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the
* maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message
* size is the max amount allowed on the link.</p>
*
* <!-- src_embed com.azure.messaging.eventhubs.eventhubproducerclient.send
* <pre>
* TokenCredential credential = new DefaultAzureCredentialBuilder&
*
* EventHubProducerClient producer = new EventHubClientBuilder&
* .credential&
* credential&
* .buildProducerClient&
*
* List<EventData> events = Arrays.asList&
* new EventData&
*
* SendOptions sendOptions = new SendOptions&
* producer.send&
* </pre>
* <!-- end com.azure.messaging.eventhubs.eventhubproducerclient.send
*
* <p>
* For more information regarding the maximum event size allowed, see
* <a href="https:
* Limits</a>.
* </p>
*
* @param events Events to send to the service.
* @param options The set of options to consider when sending this batch.
* @throws AmqpException if the size of {@code events} exceed the maximum size of a single batch.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(Iterable<EventData> events, SendOptions options) {
producer.send(events, options).block();
}
/**
* Sends the batch to the associated Event Hub.
*
* @param batch The batch to send to the service.
* @throws NullPointerException if {@code batch} is {@code null}.
* @see EventHubProducerClient
* @see EventHubProducerClient
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void send(EventDataBatch batch) {
producer.send(batch).block();
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
producer.close();
}
/**
* Gets the client identifier.
*
* @return The unique identifier string for current client.
*/
public String getIdentifier() {
return producer.getIdentifier();
}
} |
why the 10 second logic? | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | nowSnapshot.plus(10, ChronoUnit.SECONDS), | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} |
why the need for this recursive function? Mainly concerned about the memory stack that this will create and specially when it is being called inside synchronized block. | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | hasAnyActualMeterRegistryCore(compositeRegistry, 1) | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} |
So, the meter registry is a so called CompositeMeterRegistry, which can contain other MeterRegistries - and those can be CompositeMeterRegistries again. If no actual non-composite MeterReistry exists we can bail out and avoid the overhead of even emitting metrics. I am caching the result for 10 seconds as a compromise - would still react after 10 seconds when someone registers a new meter registry but avoid the relatively expensive check in the hot path. Are you asking why hard-coded 10 seconds vs. making it configurable? | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | nowSnapshot.plus(10, ChronoUnit.SECONDS), | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} |
See - above - because a CMR could contain another CMR - but I have limited the recursion (will bail out when depth is > 100 - we could further reduce it - I think even 5 or 10 should be ok). | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | hasAnyActualMeterRegistryCore(compositeRegistry, 1) | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} |
Depth restriction is in line 186 | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | hasAnyActualMeterRegistryCore(compositeRegistry, 1) | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} |
10 seconds is fine, we can make it configurable if need arises. | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | nowSnapshot.plus(10, ChronoUnit.SECONDS), | private static boolean hasAnyActualMeterRegistry() {
Instant nowSnapshot = Instant.now();
DescendantValidationResult snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
synchronized (lockObject) {
snapshot = lastDescendantValidation;
if (nowSnapshot.isBefore(snapshot.getExpiration())) {
return snapshot.getResult();
}
DescendantValidationResult newResult = new DescendantValidationResult(
nowSnapshot.plus(10, ChronoUnit.SECONDS),
hasAnyActualMeterRegistryCore(compositeRegistry, 1)
);
lastDescendantValidation = newResult;
return newResult.getResult();
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} | class ClientTelemetryMetrics {
private static final Logger logger = LoggerFactory.getLogger(ClientTelemetryMetrics.class);
private static final ImplementationBridgeHelpers.CosmosAsyncClientHelper.CosmosAsyncClientAccessor clientAccessor =
ImplementationBridgeHelpers.CosmosAsyncClientHelper.getCosmosAsyncClientAccessor();
private static final
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private static final PercentEscaper PERCENT_ESCAPER = new PercentEscaper("_-/.", false);
private static CompositeMeterRegistry compositeRegistry = createFreshRegistry();
private static final ConcurrentHashMap<MeterRegistry, AtomicLong> registryRefCount = new ConcurrentHashMap<>();
private static CosmosMeterOptions cpuOptions;
private static CosmosMeterOptions memoryOptions;
private static volatile DescendantValidationResult lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
private static final Object lockObject = new Object();
private static final Tag QUERYPLAN_TAG = Tag.of(
TagName.RequestOperationType.toString(),
ResourceType.DocumentCollection + "/" + OperationType.QueryPlan);
private static String convertStackTraceToString(Throwable throwable)
{
try (StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw))
{
throwable.printStackTrace(pw);
return sw.toString();
}
catch (IOException ioe)
{
throw new IllegalStateException(ioe);
}
}
private static CompositeMeterRegistry createFreshRegistry() {
CompositeMeterRegistry registry = new CompositeMeterRegistry();
if (logger.isTraceEnabled()) {
registry.config().onMeterAdded(
(meter) -> logger.trace(
"Meter '{}' added. Callstack: {}",
meter.getId().getName(),
convertStackTraceToString(new IllegalStateException("Dummy")))
);
}
return registry;
}
public static void recordSystemUsage(
float averageSystemCpuUsage,
float freeMemoryAvailableInMB
) {
if (compositeRegistry.getRegistries().isEmpty() || cpuOptions == null || memoryOptions == null) {
return;
}
if (cpuOptions.isEnabled()) {
DistributionSummary averageSystemCpuUsageMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_CPU.toString())
.baseUnit("%")
.description("Avg. System CPU load")
.maximumExpectedValue(100d)
.publishPercentiles(cpuOptions.getPercentiles())
.publishPercentileHistogram(cpuOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
averageSystemCpuUsageMeter.record(averageSystemCpuUsage);
}
if (memoryOptions.isEnabled()) {
DistributionSummary freeMemoryAvailableInMBMeter = DistributionSummary
.builder(CosmosMetricName.SYSTEM_MEMORY_FREE.toString())
.baseUnit("MB")
.description("Free memory available")
.publishPercentiles()
.publishPercentileHistogram(false)
.register(compositeRegistry);
freeMemoryAvailableInMBMeter.record(freeMemoryAvailableInMB);
}
}
public static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext
) {
recordOperation(
client,
diagnosticsContext,
diagnosticsContext.getStatusCode(),
diagnosticsContext.getSubStatusCode(),
diagnosticsContext.getMaxItemCount(),
diagnosticsContext.getActualItemCount(),
diagnosticsContext.getContainerName(),
diagnosticsContext.getDatabaseName(),
diagnosticsContext.getOperationType(),
diagnosticsContext.isPointOperation(),
diagnosticsContext.getResourceType(),
diagnosticsContext.getEffectiveConsistencyLevel(),
diagnosticsContext.getOperationId(),
diagnosticsContext.getTotalRequestCharge(),
diagnosticsContext.getDuration()
);
}
private static boolean hasAnyActualMeterRegistryCore(CompositeMeterRegistry compositeMeterRegistry, int depth) {
if (depth > 100) {
return true;
}
for (MeterRegistry registry : compositeMeterRegistry.getRegistries()) {
if (registry instanceof CompositeMeterRegistry) {
if (hasAnyActualMeterRegistryCore((CompositeMeterRegistry)registry, depth + 1)) {
return true;
}
} else {
return true;
}
}
return false;
}
private static void recordOperation(
CosmosAsyncClient client,
CosmosDiagnosticsContext diagnosticsContext,
int statusCode,
int subStatusCode,
Integer maxItemCount,
Integer actualItemCount,
String containerId,
String databaseId,
String operationType,
boolean isPointOperation,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
float requestCharge,
Duration latency
) {
boolean isClientTelemetryMetricsEnabled = clientAccessor.shouldEnableEmptyPageDiagnostics(client);
if (!hasAnyActualMeterRegistry() || !isClientTelemetryMetricsEnabled) {
return;
}
Tag clientCorrelationTag = clientAccessor.getClientCorrelationTag(client);
String accountTagValue = clientAccessor.getAccountTagValue(client);
EnumSet<TagName> metricTagNames = clientAccessor.getMetricTagNames(client);
EnumSet<MetricCategory> metricCategories = clientAccessor.getMetricCategories(client);
Set<String> contactedRegions = Collections.emptySet();
if (metricCategories.contains(MetricCategory.OperationDetails)) {
contactedRegions = diagnosticsContext.getContactedRegionNames();
}
Tags operationTags = createOperationTags(
metricTagNames,
statusCode,
subStatusCode,
containerId,
databaseId,
operationType,
resourceType,
consistencyLevel,
operationId,
isPointOperation,
contactedRegions,
clientCorrelationTag,
accountTagValue
);
OperationMetricProducer metricProducer = new OperationMetricProducer(metricCategories, metricTagNames, operationTags);
metricProducer.recordOperation(
client,
requestCharge,
latency,
maxItemCount == null ? -1 : maxItemCount,
actualItemCount == null ? -1: actualItemCount,
diagnosticsContext,
contactedRegions
);
}
public static RntbdMetricsCompletionRecorder createRntbdMetrics(
RntbdTransportClient client,
RntbdEndpoint endpoint) {
return new RntbdMetricsV2(compositeRegistry, client, endpoint);
}
public static synchronized void add(
MeterRegistry registry,
CosmosMeterOptions cpuOptions,
CosmosMeterOptions memoryOptions) {
if (registryRefCount
.computeIfAbsent(registry, (meterRegistry) -> new AtomicLong(0))
.incrementAndGet() == 1L) {
ClientTelemetryMetrics
.compositeRegistry
.add(registry);
ClientTelemetryMetrics.cpuOptions = cpuOptions;
ClientTelemetryMetrics.memoryOptions = memoryOptions;
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static synchronized void remove(MeterRegistry registry) {
if (registryRefCount
.get(registry)
.decrementAndGet() == 0L) {
registry.clear();
registry.close();
ClientTelemetryMetrics
.compositeRegistry
.remove(registry);
if (ClientTelemetryMetrics.compositeRegistry.getRegistries().isEmpty()) {
ClientTelemetryMetrics.compositeRegistry = createFreshRegistry();
}
lastDescendantValidation = new DescendantValidationResult(Instant.MIN, true);
}
}
public static String escape(String value) {
return PERCENT_ESCAPER.escape(value);
}
private static Tags createOperationTags(
EnumSet<TagName> metricTagNames,
int statusCode,
int subStatusCode,
String containerId,
String databaseId,
String operationType,
String resourceType,
ConsistencyLevel consistencyLevel,
String operationId,
boolean isPointOperation,
Set<String> contactedRegions,
Tag clientCorrelationTag,
String accountTagValue) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ClientCorrelationId)) {
effectiveTags.add(clientCorrelationTag);
}
if (metricTagNames.contains(TagName.Container)) {
String containerTagValue =
escape(accountTagValue)
+ "/"
+ databaseId != null ? escape(databaseId) : "NONE"
+ "/"
+ containerId != null ? escape(containerId) : "NONE";
effectiveTags.add(Tag.of(TagName.Container.toString(), containerTagValue));
}
if (metricTagNames.contains(TagName.Operation)) {
String operationTagValue = !isPointOperation && !Strings.isNullOrWhiteSpace(operationId)
? resourceType + "/" + operationType + "/" + escape(operationId)
: resourceType + "/" + operationType;
effectiveTags.add(Tag.of(TagName.Operation.toString(), operationTagValue));
}
if (metricTagNames.contains(TagName.OperationStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationStatusCode.toString(), String.valueOf(statusCode)));
}
if (metricTagNames.contains(TagName.OperationSubStatusCode)) {
effectiveTags.add(Tag.of(TagName.OperationSubStatusCode.toString(), String.valueOf(subStatusCode)));
}
if (metricTagNames.contains(TagName.ConsistencyLevel)) {
assert consistencyLevel != null : "ConsistencyLevel must never be null here.";
effectiveTags.add(Tag.of(
TagName.ConsistencyLevel.toString(),
consistencyLevel.toString()
));
}
if (contactedRegions != null &&
contactedRegions.size() > 0 &&
metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
String.join(", ", contactedRegions)
));
}
return Tags.of(effectiveTags);
}
private static Tags getEffectiveTags(Tags tags, CosmosMeterOptions meterOptions) {
EnumSet<TagName> suppressedTags = meterOptions.getSuppressedTagNames();
if (suppressedTags == null || suppressedTags.isEmpty()) {
return tags;
}
HashSet<String> suppressedNames = new HashSet<>();
for (TagName t: suppressedTags) {
suppressedNames.add(t.name());
}
List<Tag> result = new ArrayList<>();
for (Tag t: tags) {
if (!suppressedNames.contains(t.getKey())) {
result.add(t);
}
}
return Tags.of(result);
}
private static class OperationMetricProducer {
private final EnumSet<TagName> metricTagNames;
private final EnumSet<MetricCategory> metricCategories;
private final Tags operationTags;
public OperationMetricProducer(EnumSet<MetricCategory> metricCategories, EnumSet<TagName> metricTagNames, Tags operationTags) {
this.metricCategories = metricCategories;
this.metricTagNames = metricTagNames;
this.operationTags = operationTags;
}
public void recordOperation(
CosmosAsyncClient cosmosAsyncClient,
float requestCharge,
Duration latency,
int maxItemCount,
int actualItemCount,
CosmosDiagnosticsContext diagnosticsContext,
Set<String> contactedRegions) {
CosmosMeterOptions callsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_CALLS);
if (callsOptions.isEnabled()) {
Counter operationsCounter = Counter
.builder(callsOptions.getMeterName().toString())
.baseUnit("calls")
.description("Operation calls")
.tags(getEffectiveTags(operationTags, callsOptions))
.register(compositeRegistry);
operationsCounter.increment();
}
CosmosMeterOptions requestChargeOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_REQUEST_CHARGE);
if (requestChargeOptions.isEnabled()) {
DistributionSummary requestChargeMeter = DistributionSummary
.builder(requestChargeOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Operation RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(requestChargeOptions.getPercentiles())
.publishPercentileHistogram(requestChargeOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, requestChargeOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions regionsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_DETAILS_REGIONS_CONTACTED);
if (regionsOptions.isEnabled()) {
DistributionSummary regionsContactedMeter = DistributionSummary
.builder(regionsOptions.getMeterName().toString())
.baseUnit("Regions contacted")
.description("Operation - regions contacted")
.maximumExpectedValue(100d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, regionsOptions))
.register(compositeRegistry);
if (contactedRegions != null && contactedRegions.size() > 0) {
regionsContactedMeter.record(Math.min(contactedRegions.size(), 100d));
}
}
this.recordItemCounts(cosmosAsyncClient, maxItemCount, actualItemCount);
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.OPERATION_SUMMARY_LATENCY);
if (latencyOptions.isEnabled()) {
Timer latencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Operation latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(operationTags, latencyOptions))
.register(compositeRegistry);
latencyMeter.record(latency);
}
for (CosmosDiagnostics diagnostics: diagnosticsContext.getDiagnostics()) {
Collection<ClientSideRequestStatistics> clientSideRequestStatistics =
diagnosticsAccessor.getClientSideRequestStatistics(diagnostics);
if (clientSideRequestStatistics != null) {
for (ClientSideRequestStatistics requestStatistics : clientSideRequestStatistics) {
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getResponseStatisticsList());
recordStoreResponseStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getSupplementalResponseStatisticsList());
recordGatewayStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getDuration(),
requestStatistics.getGatewayStatisticsList());
recordAddressResolutionStatistics(
diagnosticsContext,
cosmosAsyncClient,
requestStatistics.getAddressResolutionStatistics());
}
}
FeedResponseDiagnostics feedDiagnostics = diagnosticsAccessor
.getFeedResponseDiagnostics(diagnostics);
if (feedDiagnostics == null) {
continue;
}
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics =
feedDiagnostics.getQueryPlanDiagnosticsContext();
recordQueryPlanDiagnostics(diagnosticsContext, cosmosAsyncClient, queryPlanDiagnostics);
}
}
private void recordQueryPlanDiagnostics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient cosmosAsyncClient,
QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnostics
) {
if (queryPlanDiagnostics == null || !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
Tags requestTags = operationTags.and(
createQueryPlanTags(metricTagNames)
);
CosmosMeterOptions requestsOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (requestsOptions.isEnabled() &&
(!requestsOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(requestsOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, requestsOptions))
.register(compositeRegistry);
requestCounter.increment();
}
Duration latency = queryPlanDiagnostics.getDuration();
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
cosmosAsyncClient,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
cosmosAsyncClient,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
queryPlanDiagnostics.getRequestTimeline(), requestTags);
}
private void recordRequestPayloadSizes(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
int requestPayloadSizeInBytes,
int responsePayloadSizeInBytes
) {
CosmosMeterOptions reqSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_REQUEST);
if (reqSizeOptions.isEnabled() &&
(!reqSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary requestPayloadSizeMeter = DistributionSummary
.builder(reqSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Request payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, reqSizeOptions))
.register(compositeRegistry);
requestPayloadSizeMeter.record(requestPayloadSizeInBytes);
}
CosmosMeterOptions rspSizeOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_SIZE_RESPONSE);
if (rspSizeOptions.isEnabled() &&
(!rspSizeOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary responsePayloadSizeMeter = DistributionSummary
.builder(rspSizeOptions.getMeterName().toString())
.baseUnit("bytes")
.description("Response payload size in bytes")
.maximumExpectedValue(16d * 1024)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, rspSizeOptions))
.register(compositeRegistry);
responsePayloadSizeMeter.record(responsePayloadSizeInBytes);
}
}
private void recordItemCounts(
CosmosAsyncClient client,
int maxItemCount,
int actualItemCount
) {
if (maxItemCount > 0 && this.metricCategories.contains(MetricCategory.OperationDetails)) {
CosmosMeterOptions maxItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_MAX_ITEM_COUNT);
if (maxItemCountOptions.isEnabled()) {
DistributionSummary maxItemCountMeter = DistributionSummary
.builder(maxItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Request max. item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, maxItemCountOptions))
.register(compositeRegistry);
maxItemCountMeter.record(Math.max(0, Math.min(maxItemCount, 100_000d)));
}
CosmosMeterOptions actualItemCountOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.OPERATION_DETAILS_ACTUAL_ITEM_COUNT);
if (actualItemCountOptions.isEnabled()) {
DistributionSummary actualItemCountMeter = DistributionSummary
.builder(actualItemCountOptions.getMeterName().toString())
.baseUnit("item count")
.description("Response actual item count")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(operationTags, actualItemCountOptions))
.register(compositeRegistry);
actualItemCountMeter.record(Math.max(0, Math.min(actualItemCount, 100_000d)));
}
}
}
private Tags createRequestTags(
EnumSet<TagName> metricTagNames,
String pkRangeId,
int statusCode,
int subStatusCode,
String resourceType,
String operationType,
String regionName,
String serviceEndpoint,
String serviceAddress
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.PartitionKeyRangeId)) {
effectiveTags.add(Tag.of(
TagName.PartitionKeyRangeId.toString(),
Strings.isNullOrWhiteSpace(pkRangeId) ? "NONE" : escape(pkRangeId)));
}
if (metricTagNames.contains(TagName.RequestStatusCode)) {
effectiveTags.add(Tag.of(
TagName.RequestStatusCode.toString(),
statusCode + "/" + subStatusCode));
}
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(Tag.of(
TagName.RequestOperationType.toString(),
resourceType + "/" + operationType));
}
if (metricTagNames.contains(TagName.RegionName)) {
effectiveTags.add(Tag.of(
TagName.RegionName.toString(),
regionName != null ? regionName : "NONE"));
}
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
String effectiveServiceAddress = serviceAddress != null ? escape(serviceAddress) : "NONE";
if (metricTagNames.contains(TagName.ServiceAddress)) {
effectiveTags.add(Tag.of(
TagName.ServiceAddress.toString(),
effectiveServiceAddress));
}
boolean containsPartitionId = metricTagNames.contains(TagName.PartitionId);
boolean containsReplicaId = metricTagNames.contains(TagName.ReplicaId);
if (containsPartitionId || containsReplicaId) {
String partitionId = "NONE";
String replicaId = "NONE";
String[] partitionAndReplicaId =
StoreResultDiagnostics.getPartitionAndReplicaId(effectiveServiceAddress);
if (partitionAndReplicaId.length == 2) {
partitionId = partitionAndReplicaId[0];
replicaId = partitionAndReplicaId[1];
}
if (containsPartitionId) {
effectiveTags.add(Tag.of(
TagName.PartitionId.toString(),
partitionId));
}
if (containsReplicaId) {
effectiveTags.add(Tag.of(
TagName.ReplicaId.toString(),
replicaId));
}
}
return Tags.of(effectiveTags);
}
private Tags createQueryPlanTags(
EnumSet<TagName> metricTagNames
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.RequestOperationType)) {
effectiveTags.add(QUERYPLAN_TAG);
}
return Tags.of(effectiveTags);
}
private Tags createAddressResolutionTags(
EnumSet<TagName> metricTagNames,
String serviceEndpoint,
boolean isForceRefresh,
boolean isForceCollectionRoutingMapRefresh
) {
List<Tag> effectiveTags = new ArrayList<>();
if (metricTagNames.contains(TagName.ServiceEndpoint)) {
effectiveTags.add(Tag.of(
TagName.ServiceEndpoint.toString(),
serviceEndpoint != null ? escape(serviceEndpoint) : "NONE"));
}
if (metricTagNames.contains(TagName.IsForceRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceRefresh.toString(),
isForceRefresh ? "True" : "False"));
}
if (metricTagNames.contains(TagName.IsForceCollectionRoutingMapRefresh)) {
effectiveTags.add(Tag.of(
TagName.IsForceCollectionRoutingMapRefresh.toString(),
isForceCollectionRoutingMapRefresh ? "True" : "False"));
}
return Tags.of(effectiveTags);
}
private void recordRntbdEndpointStatistics(
CosmosAsyncClient client,
RntbdEndpointStatistics endpointStatistics,
Tags requestTags) {
if (endpointStatistics == null || !this.metricCategories.contains(MetricCategory.Legacy)) {
return;
}
CosmosMeterOptions acquiredOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_ACQUIRED);
if (acquiredOptions.isEnabled()) {
DistributionSummary acquiredChannelsMeter = DistributionSummary
.builder(acquiredOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(acquired channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, acquiredOptions))
.register(compositeRegistry);
acquiredChannelsMeter.record(endpointStatistics.getAcquiredChannels());
}
CosmosMeterOptions availableOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_AVAILABLE);
if (availableOptions.isEnabled()) {
DistributionSummary availableChannelsMeter = DistributionSummary
.builder(availableOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(available channels)")
.maximumExpectedValue(100_000d)
.publishPercentiles()
.publishPercentileHistogram(false)
.tags(getEffectiveTags(requestTags, availableOptions))
.register(compositeRegistry);
availableChannelsMeter.record(endpointStatistics.getAvailableChannels());
}
CosmosMeterOptions inflightOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.LEGACY_DIRECT_ENDPOINT_STATISTICS_INFLIGHT);
if (inflightOptions.isEnabled()) {
DistributionSummary inflightRequestsMeter = DistributionSummary
.builder(inflightOptions.getMeterName().toString())
.baseUnit("
.description("Endpoint statistics(inflight requests)")
.tags(getEffectiveTags(requestTags, inflightOptions))
.maximumExpectedValue(1_000_000d)
.publishPercentiles(inflightOptions.getPercentiles())
.publishPercentileHistogram(inflightOptions.isHistogramPublishingEnabled())
.register(compositeRegistry);
inflightRequestsMeter.record(endpointStatistics.getInflightRequests());
}
}
private void recordRequestTimeline(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
CosmosMetricName name,
RequestTimeline requestTimeline,
Tags requestTags) {
if (requestTimeline == null || !this.metricCategories.contains(MetricCategory.RequestDetails)) {
return;
}
CosmosMeterOptions timelineOptions = clientAccessor.getMeterOptions(
client,
name);
if (!timelineOptions.isEnabled() ||
(timelineOptions.isDiagnosticThresholdsFilteringEnabled() && !ctx.isThresholdViolated())) {
return;
}
for (RequestTimeline.Event event : requestTimeline) {
Duration duration = event.getDuration();
if (duration == null || duration == Duration.ZERO) {
continue;
}
Timer eventMeter = Timer
.builder(timelineOptions.getMeterName().toString() + "." + escape(event.getName()))
.description("Request timeline (" + event.getName() + ")")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(timelineOptions.getPercentiles())
.publishPercentileHistogram(timelineOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, timelineOptions))
.register(compositeRegistry);
eventMeter.record(duration);
}
}
private void recordStoreResponseStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Collection<ClientSideRequestStatistics.StoreResponseStatistics> storeResponseStatistics) {
if (!this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
for (ClientSideRequestStatistics.StoreResponseStatistics responseStatistics: storeResponseStatistics) {
StoreResultDiagnostics storeResultDiagnostics = responseStatistics.getStoreResult();
StoreResponseDiagnostics storeResponseDiagnostics =
storeResultDiagnostics.getStoreResponseDiagnostics();
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNames,
storeResponseDiagnostics.getPartitionKeyRangeId(),
storeResponseDiagnostics.getStatusCode(),
storeResponseDiagnostics.getSubStatusCode(),
responseStatistics.getRequestResourceType().toString(),
responseStatistics.getRequestOperationType().toString(),
responseStatistics.getRegionName(),
storeResultDiagnostics.getStorePhysicalAddressEscapedAuthority(),
storeResultDiagnostics.getStorePhysicalAddressEscapedPath())
);
Double backendLatency = storeResultDiagnostics.getBackendLatencyInMs();
if (backendLatency != null) {
CosmosMeterOptions beLatencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_BACKEND_LATENCY);
if (beLatencyOptions.isEnabled() &&
(!beLatencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
DistributionSummary backendRequestLatencyMeter = DistributionSummary
.builder(beLatencyOptions.getMeterName().toString())
.baseUnit("ms")
.description("Backend service latency")
.maximumExpectedValue(6_000d)
.publishPercentiles(beLatencyOptions.getPercentiles())
.publishPercentileHistogram(beLatencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, beLatencyOptions))
.register(compositeRegistry);
backendRequestLatencyMeter.record(storeResultDiagnostics.getBackendLatencyInMs());
}
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = storeResponseDiagnostics.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("RNTBD Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Duration latency = responseStatistics.getDuration();
if (latency != null) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("RNTBD Request latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_DIRECT_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("RNTBD requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
if (this.metricCategories.contains(MetricCategory.RequestDetails)) {
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_DIRECT_TIMELINE,
storeResponseDiagnostics.getRequestTimeline(), requestTags);
}
recordRequestPayloadSizes(
ctx,
client,
storeResponseDiagnostics.getRequestPayloadLength(),
storeResponseDiagnostics.getResponsePayloadLength()
);
recordRntbdEndpointStatistics(
client,
storeResponseDiagnostics.getRntbdEndpointStatistics(),
requestTags);
}
}
private void recordGatewayStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Duration latency,
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList) {
if (gatewayStatisticsList == null
|| gatewayStatisticsList.size() == 0
|| !this.metricCategories.contains(MetricCategory.RequestSummary)) {
return;
}
EnumSet<TagName> metricTagNamesForGateway = metricTagNames.clone();
metricTagNamesForGateway.remove(TagName.RegionName);
metricTagNamesForGateway.remove(TagName.ServiceAddress);
metricTagNamesForGateway.remove(TagName.ServiceEndpoint);
metricTagNamesForGateway.remove(TagName.PartitionId);
metricTagNamesForGateway.remove(TagName.ReplicaId);
for (ClientSideRequestStatistics.GatewayStatistics gatewayStats : gatewayStatisticsList) {
Tags requestTags = operationTags.and(
createRequestTags(
metricTagNamesForGateway,
gatewayStats.getPartitionKeyRangeId(),
gatewayStats.getStatusCode(),
gatewayStats.getSubStatusCode(),
gatewayStats.getResourceType().toString(),
gatewayStats.getOperationType().toString(),
null,
null,
null)
);
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Gateway requests")
.tags(getEffectiveTags(requestTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
CosmosMeterOptions ruOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_REQUEST_CHARGE);
if (ruOptions.isEnabled() &&
(!ruOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
double requestCharge = gatewayStats.getRequestCharge();
DistributionSummary requestChargeMeter = DistributionSummary
.builder(ruOptions.getMeterName().toString())
.baseUnit("RU (request unit)")
.description("Gateway Request RU charge")
.maximumExpectedValue(100_000d)
.publishPercentiles(ruOptions.getPercentiles())
.publishPercentileHistogram(ruOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, ruOptions))
.register(compositeRegistry);
requestChargeMeter.record(Math.min(requestCharge, 100_000d));
}
if (latency != null) {
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.REQUEST_SUMMARY_GATEWAY_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer requestLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Gateway Request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(requestTags, latencyOptions))
.register(compositeRegistry);
requestLatencyMeter.record(latency);
}
}
recordRequestTimeline(
ctx,
client,
CosmosMetricName.REQUEST_DETAILS_GATEWAY_TIMELINE,
gatewayStats.getRequestTimeline(), requestTags);
}
}
private void recordAddressResolutionStatistics(
CosmosDiagnosticsContext ctx,
CosmosAsyncClient client,
Map<String, ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsMap) {
if (addressResolutionStatisticsMap == null
|| addressResolutionStatisticsMap.size() == 0
|| !this.metricCategories.contains(MetricCategory.AddressResolutions) ) {
return;
}
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics
: addressResolutionStatisticsMap.values()) {
if (addressResolutionStatistics.isInflightRequest() ||
addressResolutionStatistics.getEndTimeUTC() == null) {
continue;
}
Tags addressResolutionTags = operationTags.and(
createAddressResolutionTags(
metricTagNames,
addressResolutionStatistics.getTargetEndpoint(),
addressResolutionStatistics.isForceRefresh(),
addressResolutionStatistics.isForceCollectionRoutingMapRefresh()
)
);
Duration latency = Duration.between(
addressResolutionStatistics.getStartTimeUTC(),
addressResolutionStatistics.getEndTimeUTC());
CosmosMeterOptions latencyOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_LATENCY);
if (latencyOptions.isEnabled() &&
(!latencyOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Timer addressResolutionLatencyMeter = Timer
.builder(latencyOptions.getMeterName().toString())
.description("Address resolution latency")
.maximumExpectedValue(Duration.ofSeconds(6))
.publishPercentiles(latencyOptions.getPercentiles())
.publishPercentileHistogram(latencyOptions.isHistogramPublishingEnabled())
.tags(getEffectiveTags(addressResolutionTags, latencyOptions))
.register(compositeRegistry);
addressResolutionLatencyMeter.record(latency);
}
CosmosMeterOptions reqOptions = clientAccessor.getMeterOptions(
client,
CosmosMetricName.DIRECT_ADDRESS_RESOLUTION_REQUESTS);
if (reqOptions.isEnabled() &&
(!reqOptions.isDiagnosticThresholdsFilteringEnabled() || ctx.isThresholdViolated())) {
Counter requestCounter = Counter
.builder(reqOptions.getMeterName().toString())
.baseUnit("requests")
.description("Address resolution requests")
.tags(getEffectiveTags(addressResolutionTags, reqOptions))
.register(compositeRegistry);
requestCounter.increment();
}
}
}
}
private static class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(
options.getMeterName().toString(),
endpoint.durableEndpointMetrics(),
RntbdDurableEndpointMetrics::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
static class DescendantValidationResult {
private final Instant expiration;
private final boolean result;
public DescendantValidationResult(Instant expiration, boolean result) {
this.expiration = expiration;
this.result = result;
}
public Instant getExpiration() {
return this.expiration;
}
public boolean getResult() {
return this.result;
}
}
} |
nit: I don't think you need `super.` here? | private void initializeClient(HttpClient httpClient) {
ClientCertificateCredentialBuilder builder = new ClientCertificateCredentialBuilder()
.clientId(isPlaybackMode() ? "Dummy-Id" : getClientId())
.tenantId(isPlaybackMode() ? "Dummy-Id" : getTenantId())
.pipeline(super.getHttpPipeline(httpClient));
credential = isPlaybackMode()
? builder.pemCertificate(getClass().getClassLoader().getResourceAsStream("pemCert.pem")).build()
: builder.pemCertificate(Configuration.getGlobalConfiguration().get("AZURE_CLIENT_CERTIFICATE_PATH")).build();
} | .pipeline(super.getHttpPipeline(httpClient)); | private void initializeClient(HttpClient httpClient) {
ClientCertificateCredentialBuilder builder = new ClientCertificateCredentialBuilder()
.clientId(isPlaybackMode() ? "Dummy-Id" : getClientId())
.tenantId(isPlaybackMode() ? "Dummy-Id" : getTenantId())
.pipeline(super.getHttpPipeline(httpClient));
credential = isPlaybackMode()
? builder.pemCertificate(getClass().getClassLoader().getResourceAsStream("pemCert.pem")).build()
: builder.pemCertificate(Configuration.getGlobalConfiguration().get("AZURE_CLIENT_CERTIFICATE_PATH")).build();
} | class ClientCertificateCredentialTest extends IdentityTestBase {
private static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private ClientCertificateCredential credential;
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getHttpClients")
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "PLAYBACK")
public void getToken(HttpClient httpClient) {
initializeClient(httpClient);
AccessToken actual = credential.getTokenSync(new TokenRequestContext().addScopes("https:
assertNotNull(actual);
assertNotNull(actual.getToken());
assertNotNull(actual.getExpiresAt());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getHttpClients")
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "PLAYBACK")
public void getTokenAsync(HttpClient httpClient) {
initializeClient(httpClient);
StepVerifier.create(credential.getToken(new TokenRequestContext().addScopes("https:
.expectNextMatches(accessToken -> accessToken.getToken() != null && accessToken.getExpiresAt() != null)
.verifyComplete();
}
} | class ClientCertificateCredentialTest extends IdentityTestBase {
private static final String DISPLAY_NAME_WITH_ARGUMENTS = "{displayName} with [{arguments}]";
private ClientCertificateCredential credential;
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getHttpClients")
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "PLAYBACK")
public void getToken(HttpClient httpClient) {
initializeClient(httpClient);
AccessToken actual = credential.getTokenSync(new TokenRequestContext().addScopes("https:
assertNotNull(actual);
assertNotNull(actual.getToken());
assertNotNull(actual.getExpiresAt());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getHttpClients")
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "PLAYBACK")
public void getTokenAsync(HttpClient httpClient) {
initializeClient(httpClient);
StepVerifier.create(credential.getToken(new TokenRequestContext().addScopes("https:
.expectNextMatches(accessToken -> accessToken.getToken() != null && accessToken.getExpiresAt() != null)
.verifyComplete();
}
} |
`addMatchers` is varargs so you don't need to make the list. | HttpPipeline getHttpPipeline(HttpClient httpClient) {
final List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() || interceptorManager.isPlaybackMode()) {
List<TestProxySanitizer> customSanitizers = new ArrayList<>();
customSanitizers.add(new TestProxySanitizer("$..access_token", null, INVALID_DUMMY_TOKEN,
TestProxySanitizerType.BODY_KEY));
customSanitizers.add(new TestProxySanitizer("client-request-id", null, "REDACTED",
TestProxySanitizerType.HEADER));
customSanitizers.add(new TestProxySanitizer("x-client-last-telemetry", null, "REDACTED",
TestProxySanitizerType.HEADER));
customSanitizers.add(new TestProxySanitizer(null, "(client_id=)[^&]+", "$1Dummy-Id",
TestProxySanitizerType.BODY_REGEX));
customSanitizers.add(new TestProxySanitizer(null, "(client_secret=)[^&]+", "$1Dummy-Secret",
TestProxySanitizerType.BODY_REGEX));
customSanitizers.add(new TestProxySanitizer(null, "(client_assertion=)[^&]+", "$1Dummy-Secret",
TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(customSanitizers);
}
if (interceptorManager.isRecordMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
if (interceptorManager.isPlaybackMode()) {
List<TestProxyRequestMatcher> customMatchers = new ArrayList<>();
customMatchers.add(new BodilessMatcher());
customMatchers.add(new CustomMatcher().setExcludedHeaders(Collections.singletonList("X-MRC-CV")));
interceptorManager.addMatchers(customMatchers);
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | interceptorManager.addMatchers(customMatchers); | HttpPipeline getHttpPipeline(HttpClient httpClient) {
final List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() || interceptorManager.isPlaybackMode()) {
List<TestProxySanitizer> customSanitizers = new ArrayList<>();
customSanitizers.add(new TestProxySanitizer("$..access_token", null, INVALID_DUMMY_TOKEN,
TestProxySanitizerType.BODY_KEY));
customSanitizers.add(new TestProxySanitizer("client-request-id", null, "REDACTED",
TestProxySanitizerType.HEADER));
customSanitizers.add(new TestProxySanitizer("x-client-last-telemetry", null, "REDACTED",
TestProxySanitizerType.HEADER));
customSanitizers.add(new TestProxySanitizer(null, "(client_id=)[^&]+", "$1Dummy-Id",
TestProxySanitizerType.BODY_REGEX));
customSanitizers.add(new TestProxySanitizer(null, "(client_secret=)[^&]+", "$1Dummy-Secret",
TestProxySanitizerType.BODY_REGEX));
customSanitizers.add(new TestProxySanitizer(null, "(client_assertion=)[^&]+", "$1Dummy-Secret",
TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(customSanitizers);
}
if (interceptorManager.isRecordMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
if (interceptorManager.isPlaybackMode()) {
List<TestProxyRequestMatcher> customMatchers = new ArrayList<>();
customMatchers.add(new BodilessMatcher());
customMatchers.add(new CustomMatcher().setExcludedHeaders(Collections.singletonList("X-MRC-CV")));
interceptorManager.addMatchers(customMatchers);
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | class IdentityTestBase extends TestProxyTestBase {
public static final String INVALID_DUMMY_TOKEN = "eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJlbWFpbCI6IkJvYkBjb250b"
+ "3NvLmNvbSIsImdpdmVuX25hbWUiOiJCb2IiLCJpc3MiOiJodHRwOi8vRGVmYXVsdC5Jc3N1ZXIuY29tIiwiYXVkIjoiaHR0cDovL0RlZm"
+ "F1bHQuQXVkaWVuY2UuY29tIiwiaWF0IjoiMTYwNzk3ODY4MyIsIm5iZiI6IjE2MDc5Nzg2ODMiLCJleHAiOiIxNjA3OTc4OTgzIn0.";
public static final String INVALID_DUMMY_CLIENT_ASSERTION = "eyJ4NXQiOiJOVmhMQWFmMVVRQTdpZVpKTk5SWmhKWmhKV3c9Iiwi"
+ "YWxnIjoiUlMyNTYifQ.eyJpc3MiOiIwNmM4MmFjZC1hMDIzLTQwMGQtYjZhOC1kNjU3MDQ5NzliNGYiLCJhdWQiOiJodHRwczovL2xvZ2l"
+ "uLm1pY3Jvc29mdG9ubGluZS5jb20vNDZkZDVhNDMtZDE5YS00YWU0LTljNjAtNjUwY2M4OTA5YjExL29hdXRoMi92Mi4wL3Rva2VuIiwic"
+ "3ViIjoiMDZjODJhY2QtYTAyMy00MDBkLWI2YTgtZDY1NzA0OTc5YjRmIiwibmJmIjoxNzA4NjUyNjQyLCJleHAiOjE3MDg2NTMyNDIsImp"
+ "0aSI6ImQ4YTVlZThlLWZmNzMtNDdmZC05NTg0LTFiZmI3NTc3NDc2MiJ9.EKjPiv89K7_awBtOSrguQ9BUIbO_RylvyPuH8a6u-N-6FdX3"
+ "dG3V9fEnR7PEId8yZnQq4QAGyFirmf9vPy8XXdJ1h-Ok8PzFcU-FtN-aFddRhDBZEj37kXtqyNqEq-lw4eQvuURCQrk_e8ZsG6XR2SZsyTM"
+ "uBfr_maQi0Uagg-yax9_ITK1OmJhfv0e93H4zBNsCucT-LFT2IyvXaULBoq04HyhFkhlvlyC8pSiM9jqTYwm64y0ipG-LHbq1jmwHdyTAXx"
+ "OtYfPjXZTAHZD2NakTZQjf3-TC-Ol-7xuD5rtj749RApstk3YglQ5L2rf1e989nu7Jvuh8-XLkz_wrIe06RaBeZsztS-yg3ZlrfOB34glq7"
+ "YpjRAYQXcnnHzfLLibxmMVY0bL1nLRR1PXmgBX2udxpHdm49CwaEXzO4RXPCKMwndFktkxNCv8yXUI1lhZWesVXmVns4RnGF_3HI8J3peS-"
+ "JQ6b3ZYgekD12tJ54GxaebjXXepQz9AHyfRVPJjayT4YBb7V4Gtq1qZhNi44BFx0f-gaZdkBQhx2eaMRxFjJ9lqPTEYWHO0G2gcfH6MIL7r"
+ "EwMfQ30ZjDYuVfiiAe7FE8L1ANxWsXvmNwYrRC7U_QCHXl7nwnflVb_1Isd-T2E-bc6z3jFbLyLlE4SYzP6468GlhlCwajIybEME";
String getClientId() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_ID");
}
String getTenantId() {
return Configuration.getGlobalConfiguration().get("AZURE_TENANT_ID");
}
String getClientSecret() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_SECRET");
}
String getClientAssertion() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_ASSERTION");
}
boolean isPlaybackMode() {
return interceptorManager.isPlaybackMode();
}
} | class IdentityTestBase extends TestProxyTestBase {
public static final String INVALID_DUMMY_TOKEN = "eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJlbWFpbCI6IkJvYkBjb250b"
+ "3NvLmNvbSIsImdpdmVuX25hbWUiOiJCb2IiLCJpc3MiOiJodHRwOi8vRGVmYXVsdC5Jc3N1ZXIuY29tIiwiYXVkIjoiaHR0cDovL0RlZm"
+ "F1bHQuQXVkaWVuY2UuY29tIiwiaWF0IjoiMTYwNzk3ODY4MyIsIm5iZiI6IjE2MDc5Nzg2ODMiLCJleHAiOiIxNjA3OTc4OTgzIn0.";
public static final String INVALID_DUMMY_CLIENT_ASSERTION = "eyJ4NXQiOiJOVmhMQWFmMVVRQTdpZVpKTk5SWmhKWmhKV3c9Iiwi"
+ "YWxnIjoiUlMyNTYifQ.eyJpc3MiOiIwNmM4MmFjZC1hMDIzLTQwMGQtYjZhOC1kNjU3MDQ5NzliNGYiLCJhdWQiOiJodHRwczovL2xvZ2l"
+ "uLm1pY3Jvc29mdG9ubGluZS5jb20vNDZkZDVhNDMtZDE5YS00YWU0LTljNjAtNjUwY2M4OTA5YjExL29hdXRoMi92Mi4wL3Rva2VuIiwic"
+ "3ViIjoiMDZjODJhY2QtYTAyMy00MDBkLWI2YTgtZDY1NzA0OTc5YjRmIiwibmJmIjoxNzA4NjUyNjQyLCJleHAiOjE3MDg2NTMyNDIsImp"
+ "0aSI6ImQ4YTVlZThlLWZmNzMtNDdmZC05NTg0LTFiZmI3NTc3NDc2MiJ9.EKjPiv89K7_awBtOSrguQ9BUIbO_RylvyPuH8a6u-N-6FdX3"
+ "dG3V9fEnR7PEId8yZnQq4QAGyFirmf9vPy8XXdJ1h-Ok8PzFcU-FtN-aFddRhDBZEj37kXtqyNqEq-lw4eQvuURCQrk_e8ZsG6XR2SZsyTM"
+ "uBfr_maQi0Uagg-yax9_ITK1OmJhfv0e93H4zBNsCucT-LFT2IyvXaULBoq04HyhFkhlvlyC8pSiM9jqTYwm64y0ipG-LHbq1jmwHdyTAXx"
+ "OtYfPjXZTAHZD2NakTZQjf3-TC-Ol-7xuD5rtj749RApstk3YglQ5L2rf1e989nu7Jvuh8-XLkz_wrIe06RaBeZsztS-yg3ZlrfOB34glq7"
+ "YpjRAYQXcnnHzfLLibxmMVY0bL1nLRR1PXmgBX2udxpHdm49CwaEXzO4RXPCKMwndFktkxNCv8yXUI1lhZWesVXmVns4RnGF_3HI8J3peS-"
+ "JQ6b3ZYgekD12tJ54GxaebjXXepQz9AHyfRVPJjayT4YBb7V4Gtq1qZhNi44BFx0f-gaZdkBQhx2eaMRxFjJ9lqPTEYWHO0G2gcfH6MIL7r"
+ "EwMfQ30ZjDYuVfiiAe7FE8L1ANxWsXvmNwYrRC7U_QCHXl7nwnflVb_1Isd-T2E-bc6z3jFbLyLlE4SYzP6468GlhlCwajIybEME";
String getClientId() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_ID");
}
String getTenantId() {
return Configuration.getGlobalConfiguration().get("AZURE_TENANT_ID");
}
String getClientSecret() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_SECRET");
}
String getClientAssertion() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_ASSERTION");
}
boolean isPlaybackMode() {
return interceptorManager.isPlaybackMode();
}
} |
same - this is also varargs. | HttpPipeline getHttpPipeline(HttpClient httpClient) {
final List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() || interceptorManager.isPlaybackMode()) {
List<TestProxySanitizer> customSanitizers = new ArrayList<>();
customSanitizers.add(new TestProxySanitizer("$..access_token", null, INVALID_DUMMY_TOKEN,
TestProxySanitizerType.BODY_KEY));
customSanitizers.add(new TestProxySanitizer("client-request-id", null, "REDACTED",
TestProxySanitizerType.HEADER));
customSanitizers.add(new TestProxySanitizer("x-client-last-telemetry", null, "REDACTED",
TestProxySanitizerType.HEADER));
customSanitizers.add(new TestProxySanitizer(null, "(client_id=)[^&]+", "$1Dummy-Id",
TestProxySanitizerType.BODY_REGEX));
customSanitizers.add(new TestProxySanitizer(null, "(client_secret=)[^&]+", "$1Dummy-Secret",
TestProxySanitizerType.BODY_REGEX));
customSanitizers.add(new TestProxySanitizer(null, "(client_assertion=)[^&]+", "$1Dummy-Secret",
TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(customSanitizers);
}
if (interceptorManager.isRecordMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
if (interceptorManager.isPlaybackMode()) {
List<TestProxyRequestMatcher> customMatchers = new ArrayList<>();
customMatchers.add(new BodilessMatcher());
customMatchers.add(new CustomMatcher().setExcludedHeaders(Collections.singletonList("X-MRC-CV")));
interceptorManager.addMatchers(customMatchers);
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | interceptorManager.addSanitizers(customSanitizers); | HttpPipeline getHttpPipeline(HttpClient httpClient) {
final List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() || interceptorManager.isPlaybackMode()) {
List<TestProxySanitizer> customSanitizers = new ArrayList<>();
customSanitizers.add(new TestProxySanitizer("$..access_token", null, INVALID_DUMMY_TOKEN,
TestProxySanitizerType.BODY_KEY));
customSanitizers.add(new TestProxySanitizer("client-request-id", null, "REDACTED",
TestProxySanitizerType.HEADER));
customSanitizers.add(new TestProxySanitizer("x-client-last-telemetry", null, "REDACTED",
TestProxySanitizerType.HEADER));
customSanitizers.add(new TestProxySanitizer(null, "(client_id=)[^&]+", "$1Dummy-Id",
TestProxySanitizerType.BODY_REGEX));
customSanitizers.add(new TestProxySanitizer(null, "(client_secret=)[^&]+", "$1Dummy-Secret",
TestProxySanitizerType.BODY_REGEX));
customSanitizers.add(new TestProxySanitizer(null, "(client_assertion=)[^&]+", "$1Dummy-Secret",
TestProxySanitizerType.BODY_REGEX));
interceptorManager.addSanitizers(customSanitizers);
}
if (interceptorManager.isRecordMode()) {
policies.add(interceptorManager.getRecordPolicy());
}
if (interceptorManager.isPlaybackMode()) {
List<TestProxyRequestMatcher> customMatchers = new ArrayList<>();
customMatchers.add(new BodilessMatcher());
customMatchers.add(new CustomMatcher().setExcludedHeaders(Collections.singletonList("X-MRC-CV")));
interceptorManager.addMatchers(customMatchers);
}
HttpPipeline pipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient)
.build();
return pipeline;
} | class IdentityTestBase extends TestProxyTestBase {
public static final String INVALID_DUMMY_TOKEN = "eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJlbWFpbCI6IkJvYkBjb250b"
+ "3NvLmNvbSIsImdpdmVuX25hbWUiOiJCb2IiLCJpc3MiOiJodHRwOi8vRGVmYXVsdC5Jc3N1ZXIuY29tIiwiYXVkIjoiaHR0cDovL0RlZm"
+ "F1bHQuQXVkaWVuY2UuY29tIiwiaWF0IjoiMTYwNzk3ODY4MyIsIm5iZiI6IjE2MDc5Nzg2ODMiLCJleHAiOiIxNjA3OTc4OTgzIn0.";
public static final String INVALID_DUMMY_CLIENT_ASSERTION = "eyJ4NXQiOiJOVmhMQWFmMVVRQTdpZVpKTk5SWmhKWmhKV3c9Iiwi"
+ "YWxnIjoiUlMyNTYifQ.eyJpc3MiOiIwNmM4MmFjZC1hMDIzLTQwMGQtYjZhOC1kNjU3MDQ5NzliNGYiLCJhdWQiOiJodHRwczovL2xvZ2l"
+ "uLm1pY3Jvc29mdG9ubGluZS5jb20vNDZkZDVhNDMtZDE5YS00YWU0LTljNjAtNjUwY2M4OTA5YjExL29hdXRoMi92Mi4wL3Rva2VuIiwic"
+ "3ViIjoiMDZjODJhY2QtYTAyMy00MDBkLWI2YTgtZDY1NzA0OTc5YjRmIiwibmJmIjoxNzA4NjUyNjQyLCJleHAiOjE3MDg2NTMyNDIsImp"
+ "0aSI6ImQ4YTVlZThlLWZmNzMtNDdmZC05NTg0LTFiZmI3NTc3NDc2MiJ9.EKjPiv89K7_awBtOSrguQ9BUIbO_RylvyPuH8a6u-N-6FdX3"
+ "dG3V9fEnR7PEId8yZnQq4QAGyFirmf9vPy8XXdJ1h-Ok8PzFcU-FtN-aFddRhDBZEj37kXtqyNqEq-lw4eQvuURCQrk_e8ZsG6XR2SZsyTM"
+ "uBfr_maQi0Uagg-yax9_ITK1OmJhfv0e93H4zBNsCucT-LFT2IyvXaULBoq04HyhFkhlvlyC8pSiM9jqTYwm64y0ipG-LHbq1jmwHdyTAXx"
+ "OtYfPjXZTAHZD2NakTZQjf3-TC-Ol-7xuD5rtj749RApstk3YglQ5L2rf1e989nu7Jvuh8-XLkz_wrIe06RaBeZsztS-yg3ZlrfOB34glq7"
+ "YpjRAYQXcnnHzfLLibxmMVY0bL1nLRR1PXmgBX2udxpHdm49CwaEXzO4RXPCKMwndFktkxNCv8yXUI1lhZWesVXmVns4RnGF_3HI8J3peS-"
+ "JQ6b3ZYgekD12tJ54GxaebjXXepQz9AHyfRVPJjayT4YBb7V4Gtq1qZhNi44BFx0f-gaZdkBQhx2eaMRxFjJ9lqPTEYWHO0G2gcfH6MIL7r"
+ "EwMfQ30ZjDYuVfiiAe7FE8L1ANxWsXvmNwYrRC7U_QCHXl7nwnflVb_1Isd-T2E-bc6z3jFbLyLlE4SYzP6468GlhlCwajIybEME";
String getClientId() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_ID");
}
String getTenantId() {
return Configuration.getGlobalConfiguration().get("AZURE_TENANT_ID");
}
String getClientSecret() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_SECRET");
}
String getClientAssertion() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_ASSERTION");
}
boolean isPlaybackMode() {
return interceptorManager.isPlaybackMode();
}
} | class IdentityTestBase extends TestProxyTestBase {
public static final String INVALID_DUMMY_TOKEN = "eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJlbWFpbCI6IkJvYkBjb250b"
+ "3NvLmNvbSIsImdpdmVuX25hbWUiOiJCb2IiLCJpc3MiOiJodHRwOi8vRGVmYXVsdC5Jc3N1ZXIuY29tIiwiYXVkIjoiaHR0cDovL0RlZm"
+ "F1bHQuQXVkaWVuY2UuY29tIiwiaWF0IjoiMTYwNzk3ODY4MyIsIm5iZiI6IjE2MDc5Nzg2ODMiLCJleHAiOiIxNjA3OTc4OTgzIn0.";
public static final String INVALID_DUMMY_CLIENT_ASSERTION = "eyJ4NXQiOiJOVmhMQWFmMVVRQTdpZVpKTk5SWmhKWmhKV3c9Iiwi"
+ "YWxnIjoiUlMyNTYifQ.eyJpc3MiOiIwNmM4MmFjZC1hMDIzLTQwMGQtYjZhOC1kNjU3MDQ5NzliNGYiLCJhdWQiOiJodHRwczovL2xvZ2l"
+ "uLm1pY3Jvc29mdG9ubGluZS5jb20vNDZkZDVhNDMtZDE5YS00YWU0LTljNjAtNjUwY2M4OTA5YjExL29hdXRoMi92Mi4wL3Rva2VuIiwic"
+ "3ViIjoiMDZjODJhY2QtYTAyMy00MDBkLWI2YTgtZDY1NzA0OTc5YjRmIiwibmJmIjoxNzA4NjUyNjQyLCJleHAiOjE3MDg2NTMyNDIsImp"
+ "0aSI6ImQ4YTVlZThlLWZmNzMtNDdmZC05NTg0LTFiZmI3NTc3NDc2MiJ9.EKjPiv89K7_awBtOSrguQ9BUIbO_RylvyPuH8a6u-N-6FdX3"
+ "dG3V9fEnR7PEId8yZnQq4QAGyFirmf9vPy8XXdJ1h-Ok8PzFcU-FtN-aFddRhDBZEj37kXtqyNqEq-lw4eQvuURCQrk_e8ZsG6XR2SZsyTM"
+ "uBfr_maQi0Uagg-yax9_ITK1OmJhfv0e93H4zBNsCucT-LFT2IyvXaULBoq04HyhFkhlvlyC8pSiM9jqTYwm64y0ipG-LHbq1jmwHdyTAXx"
+ "OtYfPjXZTAHZD2NakTZQjf3-TC-Ol-7xuD5rtj749RApstk3YglQ5L2rf1e989nu7Jvuh8-XLkz_wrIe06RaBeZsztS-yg3ZlrfOB34glq7"
+ "YpjRAYQXcnnHzfLLibxmMVY0bL1nLRR1PXmgBX2udxpHdm49CwaEXzO4RXPCKMwndFktkxNCv8yXUI1lhZWesVXmVns4RnGF_3HI8J3peS-"
+ "JQ6b3ZYgekD12tJ54GxaebjXXepQz9AHyfRVPJjayT4YBb7V4Gtq1qZhNi44BFx0f-gaZdkBQhx2eaMRxFjJ9lqPTEYWHO0G2gcfH6MIL7r"
+ "EwMfQ30ZjDYuVfiiAe7FE8L1ANxWsXvmNwYrRC7U_QCHXl7nwnflVb_1Isd-T2E-bc6z3jFbLyLlE4SYzP6468GlhlCwajIybEME";
String getClientId() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_ID");
}
String getTenantId() {
return Configuration.getGlobalConfiguration().get("AZURE_TENANT_ID");
}
String getClientSecret() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_SECRET");
}
String getClientAssertion() {
return Configuration.getGlobalConfiguration().get("AZURE_CLIENT_ASSERTION");
}
boolean isPlaybackMode() {
return interceptorManager.isPlaybackMode();
}
} |
Why do we need a new MsiHandler instance in `update()`? | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
this.storageAccountMsiHandler = new StorageAccountMsiHandler(this.authorizationManager, this, createParameters, this.updateParameters, isInCreateMode());
return super.update();
} | this.storageAccountMsiHandler = new StorageAccountMsiHandler(this.authorizationManager, this, createParameters, this.updateParameters, isInCreateMode()); | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this, this.createParameters, this.updateParameters, this.isInCreateMode());
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Just like `StorageNetworkRulesHelper` has also been instantiated twice, the `Storage Account` data needs to be referenced in real time. | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
this.storageAccountMsiHandler = new StorageAccountMsiHandler(this.authorizationManager, this, createParameters, this.updateParameters, isInCreateMode());
return super.update();
} | this.storageAccountMsiHandler = new StorageAccountMsiHandler(this.authorizationManager, this, createParameters, this.updateParameters, isInCreateMode()); | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this, this.createParameters, this.updateParameters, this.isInCreateMode());
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Should you instead make `createParameters`, `updateParameters`, `isCreateMode()` in StorageAccountImpl package private, and do `StorageAccountMsiHandler(authorizationManager, this)`. Where in `StorageAccountMsiHandler` you call there variable/method in real time? | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
this.storageAccountMsiHandler = new StorageAccountMsiHandler(this.authorizationManager, this, createParameters, this.updateParameters, isInCreateMode());
return super.update();
} | this.storageAccountMsiHandler = new StorageAccountMsiHandler(this.authorizationManager, this, createParameters, this.updateParameters, isInCreateMode()); | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this, this.createParameters, this.updateParameters, this.isInCreateMode());
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
The following adjustments have been made in the `StorageAccountMsiHandler` class: 1. The `isInCreateMode` fixed to get value via private method. 2. The construction changed to `StorageAccountMsiHandler(final AuthorizationManager authorizationManager, StorageAccountImpl storageAccount)` 3. `updateParameters` and `createParameters` are changed to be set in the `withXXXXXXXX` methods, e.g. `withUpdateParameters`. | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
this.storageAccountMsiHandler = new StorageAccountMsiHandler(this.authorizationManager, this, createParameters, this.updateParameters, isInCreateMode());
return super.update();
} | this.storageAccountMsiHandler = new StorageAccountMsiHandler(this.authorizationManager, this, createParameters, this.updateParameters, isInCreateMode()); | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this, this.createParameters, this.updateParameters, this.isInCreateMode());
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
This logic is very different from that of VirtualMachineMsiHandler. The potential switch from `SYSTEM_ASSIGNED_USER_ASSIGNED` to `SYSTEM_ASSIGNED` should already be handled in `handleRemoveAllExternalIdentitiesCase` code (when it figured out that all user-managed identity is removed). | StorageAccountMsiHandler withoutExternalManagedServiceIdentity(String identityId) {
if (storageAccount.updateParameters.identity() == null
|| IdentityType.NONE.equals(storageAccount.updateParameters.identity().type())
|| IdentityType.SYSTEM_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
return this;
} else if (IdentityType.USER_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else if (IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
}
this.userAssignedIdentities.put(identityId, null);
return this;
} | storageAccount.updateParameters.identity().withType(IdentityType.NONE); | StorageAccountMsiHandler withoutExternalManagedServiceIdentity(String identityId) {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, true);
this.userAssignedIdentities.put(identityId, null);
return this;
} | class StorageAccountMsiHandler extends RoleAssignmentHelper {
private final StorageAccountImpl storageAccount;
private List<String> creatableIdentityKeys;
private Map<String, UserAssignedIdentity> userAssignedIdentities;
private final ClientLogger logger = new ClientLogger(StorageAccountMsiHandler.class);
/**
* Creates StorageAccountMsiHandler.
*
* @param authorizationManager the graph rbac manager
* @param storageAccount the storage account to which MSI extension needs to be installed and for which role
* assignments needs to be created
*/
StorageAccountMsiHandler(final AuthorizationManager authorizationManager, StorageAccountImpl storageAccount) {
super(authorizationManager, storageAccount.taskGroup(), storageAccount.idProvider());
this.storageAccount = storageAccount;
this.creatableIdentityKeys = new ArrayList<>();
this.userAssignedIdentities = new HashMap<>();
}
/**
* Specifies that Local Managed Service Identity needs to be enabled in the storage account. If MSI extension is not
* already installed then it will be installed with access token port as 50342.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withLocalManagedServiceIdentity() {
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED);
return this;
}
/**
* Specifies that Local Managed Service Identity needs to be disabled in the storage account.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutLocalManagedServiceIdentity() {
if (storageAccount.updateParameters.identity() == null
|| IdentityType.NONE.equals(storageAccount.updateParameters.identity().type())
|| IdentityType.USER_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
return this;
} else if (IdentityType.SYSTEM_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else {
storageAccount.updateParameters.identity().withType(IdentityType.USER_ASSIGNED);
}
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param creatableIdentity yet-to-be-created identity to be associated with the storage account
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withNewExternalManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED);
TaskGroup.HasTaskGroup dependency = (TaskGroup.HasTaskGroup) creatableIdentity;
Objects.requireNonNull(dependency);
this.storageAccount.taskGroup().addDependency(dependency);
this.creatableIdentityKeys.add(creatableIdentity.key());
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param identity an identity to associate
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withExistingExternalManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
return this;
}
/**
* Specifies that given identity should be removed from the list of External Managed Service Identity associated
* with the storage account.
*
* @param identityId resource id of the identity
* @return StorageAccountMsiHandler
*/
void processCreatedExternalIdentities() {
for (String key : this.creatableIdentityKeys) {
com.azure.resourcemanager.msi.models.Identity identity = (com.azure.resourcemanager.msi.models.Identity) this.storageAccount.taskGroup().taskResult(key);
Objects.requireNonNull(identity);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
}
this.creatableIdentityKeys.clear();
}
void handleExternalIdentities() {
if (storageAccount.isInCreateMode()) {
if (!this.userAssignedIdentities.isEmpty()) {
storageAccount.createParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
}
} else {
if (!this.handleRemoveAllExternalIdentitiesCase()) {
if (!this.userAssignedIdentities.isEmpty()) {
storageAccount.updateParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
} else {
if (storageAccount.updateParameters.identity() != null) {
storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
}
}
}
}
}
/** Clear StorageAccountMsiHandler post-run specific internal state. */
void clear() {
this.userAssignedIdentities = new HashMap<>();
}
/**
* Method that handle the case where user request indicates all it want to do is remove all identities associated
* with the storage account.
*
* @return true if user indented to remove all the identities.
*/
private boolean handleRemoveAllExternalIdentitiesCase() {
if (!this.userAssignedIdentities.isEmpty()) {
int rmCount = 0;
for (UserAssignedIdentity v : this.userAssignedIdentities.values()) {
if (v == null) {
rmCount++;
} else {
break;
}
}
boolean containsRemoveOnly = rmCount > 0 && rmCount == this.userAssignedIdentities.size();
if (containsRemoveOnly) {
Set<String> currentIds = new HashSet<>();
Identity currentIdentity = storageAccount.updateParameters.identity();
if (currentIdentity != null && currentIdentity.userAssignedIdentities() != null) {
for (String id : currentIdentity.userAssignedIdentities().keySet()) {
currentIds.add(id.toLowerCase(Locale.ROOT));
}
}
Set<String> removeIds = new HashSet<>();
for (Map.Entry<String, UserAssignedIdentity> entrySet
: this.userAssignedIdentities.entrySet()) {
if (entrySet.getValue() == null) {
removeIds.add(entrySet.getKey().toLowerCase(Locale.ROOT));
}
}
boolean removeAllCurrentIds =
currentIds.size() == removeIds.size() && currentIds.containsAll(removeIds);
if (removeAllCurrentIds) {
if (currentIdentity == null || currentIdentity.type() == null) {
storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE));
} else if (currentIdentity.type().equals(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED)) {
storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
} else if (currentIdentity.type().equals(IdentityType.USER_ASSIGNED)) {
storageAccount.updateParameters.identity().withType(IdentityType.NONE);
}
storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
return true;
} else {
if (currentIds.isEmpty() && !removeIds.isEmpty() && currentIdentity == null) {
storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE).withUserAssignedIdentities(null));
return true;
}
}
}
}
return false;
}
/**
* Initialize storage account's identity property.
*
* @param identityType the identity type to set
*/
private void initStorageAccountIdentity(IdentityType identityType) {
if (!identityType.equals(IdentityType.USER_ASSIGNED)
&& !identityType.equals(IdentityType.SYSTEM_ASSIGNED)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid argument: " + identityType));
}
if (storageAccount.isInCreateMode()) {
if (storageAccount.createParameters.identity() == null
|| storageAccount.createParameters.identity().type() == null
|| storageAccount.createParameters.identity().type().equals(IdentityType.NONE)
|| storageAccount.createParameters.identity().type().equals(identityType)) {
storageAccount.createParameters.withIdentity(new Identity().withType(identityType));
} else {
storageAccount.createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED));
}
} else {
if (storageAccount.updateParameters.identity() == null
|| storageAccount.updateParameters.identity().type() == null
|| storageAccount.updateParameters.identity().type().equals(IdentityType.NONE)
|| storageAccount.updateParameters.identity().type().equals(identityType)) {
Identity identity = Objects.isNull(storageAccount.updateParameters.identity()) ? new Identity().withType(identityType) : storageAccount.updateParameters.identity().withType(identityType);
storageAccount.updateParameters.withIdentity(identity);
} else {
storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
}
}
} | class StorageAccountMsiHandler extends RoleAssignmentHelper {
private final StorageAccountImpl storageAccount;
private List<String> creatableIdentityKeys;
private Map<String, UserAssignedIdentity> userAssignedIdentities;
private final ClientLogger logger = new ClientLogger(StorageAccountMsiHandler.class);
/**
* Creates StorageAccountMsiHandler.
*
* @param authorizationManager the graph rbac manager
* @param storageAccount the storage account to which MSI extension needs to be installed and for which role
* assignments needs to be created
*/
StorageAccountMsiHandler(final AuthorizationManager authorizationManager, StorageAccountImpl storageAccount) {
super(authorizationManager, storageAccount.taskGroup(), storageAccount.idProvider());
this.storageAccount = storageAccount;
this.creatableIdentityKeys = new ArrayList<>();
this.userAssignedIdentities = new HashMap<>();
}
/**
* Specifies that Local Managed Service Identity needs to be enabled in the storage account. If MSI extension is not
* already installed then it will be installed with access token port as 50342.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withLocalManagedServiceIdentity() {
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, false);
return this;
}
/**
* Specifies that Local Managed Service Identity needs to be disabled in the storage account.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutLocalManagedServiceIdentity() {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.USER_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, true);
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param creatableIdentity yet-to-be-created identity to be associated with the storage account
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withNewExternalManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
TaskGroup.HasTaskGroup dependency = (TaskGroup.HasTaskGroup) creatableIdentity;
Objects.requireNonNull(dependency);
this.storageAccount.taskGroup().addDependency(dependency);
this.creatableIdentityKeys.add(creatableIdentity.key());
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param identity an identity to associate
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withExistingExternalManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
return this;
}
/**
* Specifies that given identity should be removed from the list of External Managed Service Identity associated
* with the storage account.
*
* @param identityId resource id of the identity
* @return StorageAccountMsiHandler
*/
void processCreatedExternalIdentities() {
for (String key : this.creatableIdentityKeys) {
com.azure.resourcemanager.msi.models.Identity identity = (com.azure.resourcemanager.msi.models.Identity) this.storageAccount.taskGroup().taskResult(key);
Objects.requireNonNull(identity);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
}
this.creatableIdentityKeys.clear();
}
void handleExternalIdentities() {
if (storageAccount.isInCreateMode()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.createParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
}
} else {
if (!this.handleRemoveAllExternalIdentitiesCase()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
} else {
if (this.storageAccount.updateParameters.identity() != null) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
}
}
}
}
}
/** Clear StorageAccountMsiHandler post-run specific internal state. */
void clear() {
this.userAssignedIdentities = new HashMap<>();
}
/**
* Method that handles the case where user request indicates all he/she wants to do is to remove all associated identities。
*
* @return true if user intends to remove all the identities
*/
private boolean handleRemoveAllExternalIdentitiesCase() {
if (!this.userAssignedIdentities.isEmpty()) {
int rmCount = 0;
for (UserAssignedIdentity v : this.userAssignedIdentities.values()) {
if (v == null) {
rmCount++;
} else {
break;
}
}
boolean containsRemoveOnly = rmCount > 0 && rmCount == this.userAssignedIdentities.size();
if (containsRemoveOnly) {
Set<String> currentIds = new HashSet<>();
Identity currentIdentity = this.storageAccount.updateParameters.identity();
if (currentIdentity != null && currentIdentity.userAssignedIdentities() != null) {
for (String id : currentIdentity.userAssignedIdentities().keySet()) {
currentIds.add(id.toLowerCase(Locale.ROOT));
}
}
Set<String> removeIds = new HashSet<>();
for (Map.Entry<String, UserAssignedIdentity> entrySet
: this.userAssignedIdentities.entrySet()) {
if (entrySet.getValue() == null) {
removeIds.add(entrySet.getKey().toLowerCase(Locale.ROOT));
}
}
boolean removeAllCurrentIds =
currentIds.size() == removeIds.size() && currentIds.containsAll(removeIds);
if (removeAllCurrentIds) {
if (currentIdentity == null || currentIdentity.type() == null) {
this.storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE));
} else if (currentIdentity.type().equals(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
} else if (currentIdentity.type().equals(IdentityType.USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
}
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
return true;
} else {
if (currentIds.isEmpty() && !removeIds.isEmpty() && currentIdentity == null) {
this.storageAccount.updateParameters.withIdentity(
new Identity().withType(IdentityType.NONE).withUserAssignedIdentities(null));
return true;
}
}
}
}
return false;
}
/**
* Initialize storage account's identity property.
*
* @param identityType the identity type to set
*/
private void initStorageAccountIdentity(IdentityType identityType, Boolean isWithout) {
if (!identityType.equals(IdentityType.USER_ASSIGNED)
&& !identityType.equals(IdentityType.SYSTEM_ASSIGNED)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid argument: " + identityType));
}
if (this.storageAccount.isInCreateMode()) {
if (Objects.isNull(this.storageAccount.createParameters.identity())) {
this.storageAccount.createParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.createParameters.identity().type())
|| this.storageAccount.createParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.createParameters.identity().type().equals(identityType)) {
this.storageAccount.createParameters.identity().withType(identityType);
} else {
this.storageAccount.createParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(this.storageAccount.innerModel().identity());
}
if (isWithout) {
if (IdentityType.SYSTEM_ASSIGNED.equals(identityType)) {
if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.updateParameters.identity().type())) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.USER_ASSIGNED);
}
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.updateParameters.identity().type())
|| this.storageAccount.updateParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.updateParameters.identity().type().equals(identityType)) {
this.storageAccount.updateParameters.identity().withType(identityType);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
}
}
}
} |
Logic here changed. So far I didn't see problem with the process. If user didn't modify identity, I assume the request to server would not have "userAssignedIdentityes" in JSON? via `storageAccount.updateParameters.identity().withUserAssignedIdentities(null)` | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters().withIdentity(this.innerModel().identity());
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | updateParameters = new StorageAccountUpdateParameters().withIdentity(this.innerModel().identity()); | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Through temporary test case testing, it was found that the change `updateParameters = new StorageAccountUpdateParameters().withIdentity(this.innerModel().identity());` is indeed inappropriate and has been fixed in the new version. | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters().withIdentity(this.innerModel().identity());
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | updateParameters = new StorageAccountUpdateParameters().withIdentity(this.innerModel().identity()); | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Fixed in the new version. | StorageAccountMsiHandler withoutExternalManagedServiceIdentity(String identityId) {
if (storageAccount.updateParameters.identity() == null
|| IdentityType.NONE.equals(storageAccount.updateParameters.identity().type())
|| IdentityType.SYSTEM_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
return this;
} else if (IdentityType.USER_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else if (IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
}
this.userAssignedIdentities.put(identityId, null);
return this;
} | storageAccount.updateParameters.identity().withType(IdentityType.NONE); | StorageAccountMsiHandler withoutExternalManagedServiceIdentity(String identityId) {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, true);
this.userAssignedIdentities.put(identityId, null);
return this;
} | class StorageAccountMsiHandler extends RoleAssignmentHelper {
private final StorageAccountImpl storageAccount;
private List<String> creatableIdentityKeys;
private Map<String, UserAssignedIdentity> userAssignedIdentities;
private final ClientLogger logger = new ClientLogger(StorageAccountMsiHandler.class);
/**
* Creates StorageAccountMsiHandler.
*
* @param authorizationManager the graph rbac manager
* @param storageAccount the storage account to which MSI extension needs to be installed and for which role
* assignments needs to be created
*/
StorageAccountMsiHandler(final AuthorizationManager authorizationManager, StorageAccountImpl storageAccount) {
super(authorizationManager, storageAccount.taskGroup(), storageAccount.idProvider());
this.storageAccount = storageAccount;
this.creatableIdentityKeys = new ArrayList<>();
this.userAssignedIdentities = new HashMap<>();
}
/**
* Specifies that Local Managed Service Identity needs to be enabled in the storage account. If MSI extension is not
* already installed then it will be installed with access token port as 50342.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withLocalManagedServiceIdentity() {
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED);
return this;
}
/**
* Specifies that Local Managed Service Identity needs to be disabled in the storage account.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutLocalManagedServiceIdentity() {
if (storageAccount.updateParameters.identity() == null
|| IdentityType.NONE.equals(storageAccount.updateParameters.identity().type())
|| IdentityType.USER_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
return this;
} else if (IdentityType.SYSTEM_ASSIGNED.equals(storageAccount.updateParameters.identity().type())) {
storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else {
storageAccount.updateParameters.identity().withType(IdentityType.USER_ASSIGNED);
}
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param creatableIdentity yet-to-be-created identity to be associated with the storage account
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withNewExternalManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED);
TaskGroup.HasTaskGroup dependency = (TaskGroup.HasTaskGroup) creatableIdentity;
Objects.requireNonNull(dependency);
this.storageAccount.taskGroup().addDependency(dependency);
this.creatableIdentityKeys.add(creatableIdentity.key());
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param identity an identity to associate
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withExistingExternalManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
return this;
}
/**
* Specifies that given identity should be removed from the list of External Managed Service Identity associated
* with the storage account.
*
* @param identityId resource id of the identity
* @return StorageAccountMsiHandler
*/
void processCreatedExternalIdentities() {
for (String key : this.creatableIdentityKeys) {
com.azure.resourcemanager.msi.models.Identity identity = (com.azure.resourcemanager.msi.models.Identity) this.storageAccount.taskGroup().taskResult(key);
Objects.requireNonNull(identity);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
}
this.creatableIdentityKeys.clear();
}
void handleExternalIdentities() {
if (storageAccount.isInCreateMode()) {
if (!this.userAssignedIdentities.isEmpty()) {
storageAccount.createParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
}
} else {
if (!this.handleRemoveAllExternalIdentitiesCase()) {
if (!this.userAssignedIdentities.isEmpty()) {
storageAccount.updateParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
} else {
if (storageAccount.updateParameters.identity() != null) {
storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
}
}
}
}
}
/** Clear StorageAccountMsiHandler post-run specific internal state. */
void clear() {
this.userAssignedIdentities = new HashMap<>();
}
/**
* Method that handle the case where user request indicates all it want to do is remove all identities associated
* with the storage account.
*
* @return true if user indented to remove all the identities.
*/
private boolean handleRemoveAllExternalIdentitiesCase() {
if (!this.userAssignedIdentities.isEmpty()) {
int rmCount = 0;
for (UserAssignedIdentity v : this.userAssignedIdentities.values()) {
if (v == null) {
rmCount++;
} else {
break;
}
}
boolean containsRemoveOnly = rmCount > 0 && rmCount == this.userAssignedIdentities.size();
if (containsRemoveOnly) {
Set<String> currentIds = new HashSet<>();
Identity currentIdentity = storageAccount.updateParameters.identity();
if (currentIdentity != null && currentIdentity.userAssignedIdentities() != null) {
for (String id : currentIdentity.userAssignedIdentities().keySet()) {
currentIds.add(id.toLowerCase(Locale.ROOT));
}
}
Set<String> removeIds = new HashSet<>();
for (Map.Entry<String, UserAssignedIdentity> entrySet
: this.userAssignedIdentities.entrySet()) {
if (entrySet.getValue() == null) {
removeIds.add(entrySet.getKey().toLowerCase(Locale.ROOT));
}
}
boolean removeAllCurrentIds =
currentIds.size() == removeIds.size() && currentIds.containsAll(removeIds);
if (removeAllCurrentIds) {
if (currentIdentity == null || currentIdentity.type() == null) {
storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE));
} else if (currentIdentity.type().equals(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED)) {
storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
} else if (currentIdentity.type().equals(IdentityType.USER_ASSIGNED)) {
storageAccount.updateParameters.identity().withType(IdentityType.NONE);
}
storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
return true;
} else {
if (currentIds.isEmpty() && !removeIds.isEmpty() && currentIdentity == null) {
storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE).withUserAssignedIdentities(null));
return true;
}
}
}
}
return false;
}
/**
* Initialize storage account's identity property.
*
* @param identityType the identity type to set
*/
private void initStorageAccountIdentity(IdentityType identityType) {
if (!identityType.equals(IdentityType.USER_ASSIGNED)
&& !identityType.equals(IdentityType.SYSTEM_ASSIGNED)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid argument: " + identityType));
}
if (storageAccount.isInCreateMode()) {
if (storageAccount.createParameters.identity() == null
|| storageAccount.createParameters.identity().type() == null
|| storageAccount.createParameters.identity().type().equals(IdentityType.NONE)
|| storageAccount.createParameters.identity().type().equals(identityType)) {
storageAccount.createParameters.withIdentity(new Identity().withType(identityType));
} else {
storageAccount.createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED));
}
} else {
if (storageAccount.updateParameters.identity() == null
|| storageAccount.updateParameters.identity().type() == null
|| storageAccount.updateParameters.identity().type().equals(IdentityType.NONE)
|| storageAccount.updateParameters.identity().type().equals(identityType)) {
Identity identity = Objects.isNull(storageAccount.updateParameters.identity()) ? new Identity().withType(identityType) : storageAccount.updateParameters.identity().withType(identityType);
storageAccount.updateParameters.withIdentity(identity);
} else {
storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
}
}
} | class StorageAccountMsiHandler extends RoleAssignmentHelper {
private final StorageAccountImpl storageAccount;
private List<String> creatableIdentityKeys;
private Map<String, UserAssignedIdentity> userAssignedIdentities;
private final ClientLogger logger = new ClientLogger(StorageAccountMsiHandler.class);
/**
* Creates StorageAccountMsiHandler.
*
* @param authorizationManager the graph rbac manager
* @param storageAccount the storage account to which MSI extension needs to be installed and for which role
* assignments needs to be created
*/
StorageAccountMsiHandler(final AuthorizationManager authorizationManager, StorageAccountImpl storageAccount) {
super(authorizationManager, storageAccount.taskGroup(), storageAccount.idProvider());
this.storageAccount = storageAccount;
this.creatableIdentityKeys = new ArrayList<>();
this.userAssignedIdentities = new HashMap<>();
}
/**
* Specifies that Local Managed Service Identity needs to be enabled in the storage account. If MSI extension is not
* already installed then it will be installed with access token port as 50342.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withLocalManagedServiceIdentity() {
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, false);
return this;
}
/**
* Specifies that Local Managed Service Identity needs to be disabled in the storage account.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutLocalManagedServiceIdentity() {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.USER_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, true);
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param creatableIdentity yet-to-be-created identity to be associated with the storage account
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withNewExternalManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
TaskGroup.HasTaskGroup dependency = (TaskGroup.HasTaskGroup) creatableIdentity;
Objects.requireNonNull(dependency);
this.storageAccount.taskGroup().addDependency(dependency);
this.creatableIdentityKeys.add(creatableIdentity.key());
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param identity an identity to associate
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withExistingExternalManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
return this;
}
/**
* Specifies that given identity should be removed from the list of External Managed Service Identity associated
* with the storage account.
*
* @param identityId resource id of the identity
* @return StorageAccountMsiHandler
*/
void processCreatedExternalIdentities() {
for (String key : this.creatableIdentityKeys) {
com.azure.resourcemanager.msi.models.Identity identity = (com.azure.resourcemanager.msi.models.Identity) this.storageAccount.taskGroup().taskResult(key);
Objects.requireNonNull(identity);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
}
this.creatableIdentityKeys.clear();
}
void handleExternalIdentities() {
if (storageAccount.isInCreateMode()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.createParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
}
} else {
if (!this.handleRemoveAllExternalIdentitiesCase()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
} else {
if (this.storageAccount.updateParameters.identity() != null) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
}
}
}
}
}
/** Clear StorageAccountMsiHandler post-run specific internal state. */
void clear() {
this.userAssignedIdentities = new HashMap<>();
}
/**
* Method that handles the case where user request indicates all he/she wants to do is to remove all associated identities。
*
* @return true if user intends to remove all the identities
*/
private boolean handleRemoveAllExternalIdentitiesCase() {
if (!this.userAssignedIdentities.isEmpty()) {
int rmCount = 0;
for (UserAssignedIdentity v : this.userAssignedIdentities.values()) {
if (v == null) {
rmCount++;
} else {
break;
}
}
boolean containsRemoveOnly = rmCount > 0 && rmCount == this.userAssignedIdentities.size();
if (containsRemoveOnly) {
Set<String> currentIds = new HashSet<>();
Identity currentIdentity = this.storageAccount.updateParameters.identity();
if (currentIdentity != null && currentIdentity.userAssignedIdentities() != null) {
for (String id : currentIdentity.userAssignedIdentities().keySet()) {
currentIds.add(id.toLowerCase(Locale.ROOT));
}
}
Set<String> removeIds = new HashSet<>();
for (Map.Entry<String, UserAssignedIdentity> entrySet
: this.userAssignedIdentities.entrySet()) {
if (entrySet.getValue() == null) {
removeIds.add(entrySet.getKey().toLowerCase(Locale.ROOT));
}
}
boolean removeAllCurrentIds =
currentIds.size() == removeIds.size() && currentIds.containsAll(removeIds);
if (removeAllCurrentIds) {
if (currentIdentity == null || currentIdentity.type() == null) {
this.storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE));
} else if (currentIdentity.type().equals(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
} else if (currentIdentity.type().equals(IdentityType.USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
}
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
return true;
} else {
if (currentIds.isEmpty() && !removeIds.isEmpty() && currentIdentity == null) {
this.storageAccount.updateParameters.withIdentity(
new Identity().withType(IdentityType.NONE).withUserAssignedIdentities(null));
return true;
}
}
}
}
return false;
}
/**
* Initialize storage account's identity property.
*
* @param identityType the identity type to set
*/
private void initStorageAccountIdentity(IdentityType identityType, Boolean isWithout) {
if (!identityType.equals(IdentityType.USER_ASSIGNED)
&& !identityType.equals(IdentityType.SYSTEM_ASSIGNED)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid argument: " + identityType));
}
if (this.storageAccount.isInCreateMode()) {
if (Objects.isNull(this.storageAccount.createParameters.identity())) {
this.storageAccount.createParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.createParameters.identity().type())
|| this.storageAccount.createParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.createParameters.identity().type().equals(identityType)) {
this.storageAccount.createParameters.identity().withType(identityType);
} else {
this.storageAccount.createParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(this.storageAccount.innerModel().identity());
}
if (isWithout) {
if (IdentityType.SYSTEM_ASSIGNED.equals(identityType)) {
if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.updateParameters.identity().type())) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.USER_ASSIGNED);
}
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.updateParameters.identity().type())
|| this.storageAccount.updateParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.updateParameters.identity().type().equals(identityType)) {
this.storageAccount.updateParameters.identity().withType(identityType);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
}
}
}
} |
DONOT FORCE PUSH on PR already in review. I cannot see what's the change on this... | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters().withIdentity(this.innerModel().identity());
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | updateParameters = new StorageAccountUpdateParameters().withIdentity(this.innerModel().identity()); | public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
StorageAccountCreateParameters createParameters;
StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
private StorageAccountMsiHandler storageAccountMsiHandler;
private final AuthorizationManager authorizationManager;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager, final AuthorizationManager authorizationManager) {
super(name, innerModel, storageManager);
this.authorizationManager = authorizationManager;
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
this.storageAccountMsiHandler = new StorageAccountMsiHandler(authorizationManager, this);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public Set<String> userAssignedManagedServiceIdentityIds() {
if (innerModel().identity() != null
&& innerModel().identity().userAssignedIdentities() != null) {
return Collections
.unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet()));
}
return Collections.unmodifiableSet(new HashSet<String>());
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
}
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withLocalManagedServiceIdentity();
return this;
}
@Override
public StorageAccountImpl withoutSystemAssignedManagedServiceIdentity() {
this.storageAccountMsiHandler.withoutLocalManagedServiceIdentity();
return this;
}
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
this.storageAccountMsiHandler.processCreatedExternalIdentities();
this.storageAccountMsiHandler.handleExternalIdentities();
this.storageAccountMsiHandler.clear();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
@Override
public StorageAccountImpl withNewUserAssignedManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.storageAccountMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity);
return this;
}
@Override
public StorageAccountImpl withExistingUserAssignedManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.storageAccountMsiHandler.withExistingExternalManagedServiceIdentity(identity);
return this;
}
@Override
public StorageAccountImpl withoutUserAssignedManagedServiceIdentity(String identityId) {
this.storageAccountMsiHandler.withoutExternalManagedServiceIdentity(identityId);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
RoleAssignmentHelper.IdProvider idProvider() {
return new RoleAssignmentHelper.IdProvider() {
@Override
public String principalId() {
if (innerModel() != null && innerModel().identity() != null) {
return innerModel().identity().principalId();
} else {
return null;
}
}
@Override
public String resourceId() {
if (innerModel() != null) {
return innerModel().id();
} else {
return null;
}
}
};
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
This line seems still better to check `storageAccount.updateParameters.identity().type()`? | private void initStorageAccountIdentity(IdentityType identityType, Boolean isWithout) {
if (!identityType.equals(IdentityType.USER_ASSIGNED)
&& !identityType.equals(IdentityType.SYSTEM_ASSIGNED)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid argument: " + identityType));
}
if (!isWithout) {
if (this.storageAccount.isInCreateMode()) {
if (Objects.isNull(this.storageAccount.createParameters.identity())) {
this.storageAccount.createParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.createParameters.identity().type())
|| this.storageAccount.createParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.createParameters.identity().type().equals(identityType)) {
this.storageAccount.createParameters.identity().withType(identityType);
} else {
this.storageAccount.createParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(this.storageAccount.innerModel().identity());
}
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.updateParameters.identity().type())
|| this.storageAccount.updateParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.updateParameters.identity().type().equals(identityType)) {
this.storageAccount.updateParameters.identity().withType(identityType);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(this.storageAccount.innerModel().identity());
}
if (IdentityType.SYSTEM_ASSIGNED.equals(identityType)) {
if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.USER_ASSIGNED);
}
}
}
} | if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) { | private void initStorageAccountIdentity(IdentityType identityType, Boolean isWithout) {
if (!identityType.equals(IdentityType.USER_ASSIGNED)
&& !identityType.equals(IdentityType.SYSTEM_ASSIGNED)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid argument: " + identityType));
}
if (this.storageAccount.isInCreateMode()) {
if (Objects.isNull(this.storageAccount.createParameters.identity())) {
this.storageAccount.createParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.createParameters.identity().type())
|| this.storageAccount.createParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.createParameters.identity().type().equals(identityType)) {
this.storageAccount.createParameters.identity().withType(identityType);
} else {
this.storageAccount.createParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(this.storageAccount.innerModel().identity());
}
if (isWithout) {
if (IdentityType.SYSTEM_ASSIGNED.equals(identityType)) {
if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.updateParameters.identity().type())) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.USER_ASSIGNED);
}
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.updateParameters.identity().type())
|| this.storageAccount.updateParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.updateParameters.identity().type().equals(identityType)) {
this.storageAccount.updateParameters.identity().withType(identityType);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
}
}
} | class StorageAccountMsiHandler extends RoleAssignmentHelper {
private final StorageAccountImpl storageAccount;
private List<String> creatableIdentityKeys;
private Map<String, UserAssignedIdentity> userAssignedIdentities;
private final ClientLogger logger = new ClientLogger(StorageAccountMsiHandler.class);
/**
* Creates StorageAccountMsiHandler.
*
* @param authorizationManager the graph rbac manager
* @param storageAccount the storage account to which MSI extension needs to be installed and for which role
* assignments needs to be created
*/
StorageAccountMsiHandler(final AuthorizationManager authorizationManager, StorageAccountImpl storageAccount) {
super(authorizationManager, storageAccount.taskGroup(), storageAccount.idProvider());
this.storageAccount = storageAccount;
this.creatableIdentityKeys = new ArrayList<>();
this.userAssignedIdentities = new HashMap<>();
}
/**
* Specifies that Local Managed Service Identity needs to be enabled in the storage account. If MSI extension is not
* already installed then it will be installed with access token port as 50342.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withLocalManagedServiceIdentity() {
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, false);
return this;
}
/**
* Specifies that Local Managed Service Identity needs to be disabled in the storage account.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutLocalManagedServiceIdentity() {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.USER_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, true);
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param creatableIdentity yet-to-be-created identity to be associated with the storage account
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withNewExternalManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
TaskGroup.HasTaskGroup dependency = (TaskGroup.HasTaskGroup) creatableIdentity;
Objects.requireNonNull(dependency);
this.storageAccount.taskGroup().addDependency(dependency);
this.creatableIdentityKeys.add(creatableIdentity.key());
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param identity an identity to associate
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withExistingExternalManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
return this;
}
/**
* Specifies that given identity should be removed from the list of External Managed Service Identity associated
* with the storage account.
*
* @param identityId resource id of the identity
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutExternalManagedServiceIdentity(String identityId) {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, true);
this.userAssignedIdentities.put(identityId, null);
return this;
}
void processCreatedExternalIdentities() {
for (String key : this.creatableIdentityKeys) {
com.azure.resourcemanager.msi.models.Identity identity = (com.azure.resourcemanager.msi.models.Identity) this.storageAccount.taskGroup().taskResult(key);
Objects.requireNonNull(identity);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
}
this.creatableIdentityKeys.clear();
}
void handleExternalIdentities() {
if (storageAccount.isInCreateMode()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.createParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
}
} else {
if (!this.handleRemoveAllExternalIdentitiesCase()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
} else {
if (this.storageAccount.updateParameters.identity() != null) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
}
}
}
}
}
/** Clear StorageAccountMsiHandler post-run specific internal state. */
void clear() {
this.userAssignedIdentities = new HashMap<>();
}
/**
* Method that handle the case where user request indicates all it want to do is remove all identities associated
* with the storage account.
*
* @return true if user indented to remove all the identities.
*/
private boolean handleRemoveAllExternalIdentitiesCase() {
if (!this.userAssignedIdentities.isEmpty()) {
int rmCount = 0;
for (UserAssignedIdentity v : this.userAssignedIdentities.values()) {
if (v == null) {
rmCount++;
} else {
break;
}
}
boolean containsRemoveOnly = rmCount > 0 && rmCount == this.userAssignedIdentities.size();
if (containsRemoveOnly) {
Set<String> currentIds = new HashSet<>();
Identity currentIdentity = this.storageAccount.updateParameters.identity();
if (currentIdentity != null && currentIdentity.userAssignedIdentities() != null) {
for (String id : currentIdentity.userAssignedIdentities().keySet()) {
currentIds.add(id.toLowerCase(Locale.ROOT));
}
}
Set<String> removeIds = new HashSet<>();
for (Map.Entry<String, UserAssignedIdentity> entrySet
: this.userAssignedIdentities.entrySet()) {
if (entrySet.getValue() == null) {
removeIds.add(entrySet.getKey().toLowerCase(Locale.ROOT));
}
}
boolean removeAllCurrentIds =
currentIds.size() == removeIds.size() && currentIds.containsAll(removeIds);
if (removeAllCurrentIds) {
if (currentIdentity == null || currentIdentity.type() == null) {
this.storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE));
} else if (currentIdentity.type().equals(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
} else if (currentIdentity.type().equals(IdentityType.USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
}
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
return true;
} else {
if (currentIds.isEmpty() && !removeIds.isEmpty() && currentIdentity == null) {
this.storageAccount.updateParameters.withIdentity(
new Identity().withType(IdentityType.NONE).withUserAssignedIdentities(null));
return true;
}
}
}
}
return false;
}
/**
* Initialize storage account's identity property.
*
* @param identityType the identity type to set
*/
} | class StorageAccountMsiHandler extends RoleAssignmentHelper {
private final StorageAccountImpl storageAccount;
private List<String> creatableIdentityKeys;
private Map<String, UserAssignedIdentity> userAssignedIdentities;
private final ClientLogger logger = new ClientLogger(StorageAccountMsiHandler.class);
/**
* Creates StorageAccountMsiHandler.
*
* @param authorizationManager the graph rbac manager
* @param storageAccount the storage account to which MSI extension needs to be installed and for which role
* assignments needs to be created
*/
StorageAccountMsiHandler(final AuthorizationManager authorizationManager, StorageAccountImpl storageAccount) {
super(authorizationManager, storageAccount.taskGroup(), storageAccount.idProvider());
this.storageAccount = storageAccount;
this.creatableIdentityKeys = new ArrayList<>();
this.userAssignedIdentities = new HashMap<>();
}
/**
* Specifies that Local Managed Service Identity needs to be enabled in the storage account. If MSI extension is not
* already installed then it will be installed with access token port as 50342.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withLocalManagedServiceIdentity() {
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, false);
return this;
}
/**
* Specifies that Local Managed Service Identity needs to be disabled in the storage account.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutLocalManagedServiceIdentity() {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.USER_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, true);
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param creatableIdentity yet-to-be-created identity to be associated with the storage account
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withNewExternalManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
TaskGroup.HasTaskGroup dependency = (TaskGroup.HasTaskGroup) creatableIdentity;
Objects.requireNonNull(dependency);
this.storageAccount.taskGroup().addDependency(dependency);
this.creatableIdentityKeys.add(creatableIdentity.key());
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param identity an identity to associate
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withExistingExternalManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
return this;
}
/**
* Specifies that given identity should be removed from the list of External Managed Service Identity associated
* with the storage account.
*
* @param identityId resource id of the identity
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutExternalManagedServiceIdentity(String identityId) {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, true);
this.userAssignedIdentities.put(identityId, null);
return this;
}
void processCreatedExternalIdentities() {
for (String key : this.creatableIdentityKeys) {
com.azure.resourcemanager.msi.models.Identity identity = (com.azure.resourcemanager.msi.models.Identity) this.storageAccount.taskGroup().taskResult(key);
Objects.requireNonNull(identity);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
}
this.creatableIdentityKeys.clear();
}
void handleExternalIdentities() {
if (storageAccount.isInCreateMode()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.createParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
}
} else {
if (!this.handleRemoveAllExternalIdentitiesCase()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
} else {
if (this.storageAccount.updateParameters.identity() != null) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
}
}
}
}
}
/** Clear StorageAccountMsiHandler post-run specific internal state. */
void clear() {
this.userAssignedIdentities = new HashMap<>();
}
/**
* Method that handles the case where user request indicates all he/she wants to do is to remove all associated identities。
*
* @return true if user intends to remove all the identities
*/
private boolean handleRemoveAllExternalIdentitiesCase() {
if (!this.userAssignedIdentities.isEmpty()) {
int rmCount = 0;
for (UserAssignedIdentity v : this.userAssignedIdentities.values()) {
if (v == null) {
rmCount++;
} else {
break;
}
}
boolean containsRemoveOnly = rmCount > 0 && rmCount == this.userAssignedIdentities.size();
if (containsRemoveOnly) {
Set<String> currentIds = new HashSet<>();
Identity currentIdentity = this.storageAccount.updateParameters.identity();
if (currentIdentity != null && currentIdentity.userAssignedIdentities() != null) {
for (String id : currentIdentity.userAssignedIdentities().keySet()) {
currentIds.add(id.toLowerCase(Locale.ROOT));
}
}
Set<String> removeIds = new HashSet<>();
for (Map.Entry<String, UserAssignedIdentity> entrySet
: this.userAssignedIdentities.entrySet()) {
if (entrySet.getValue() == null) {
removeIds.add(entrySet.getKey().toLowerCase(Locale.ROOT));
}
}
boolean removeAllCurrentIds =
currentIds.size() == removeIds.size() && currentIds.containsAll(removeIds);
if (removeAllCurrentIds) {
if (currentIdentity == null || currentIdentity.type() == null) {
this.storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE));
} else if (currentIdentity.type().equals(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
} else if (currentIdentity.type().equals(IdentityType.USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
}
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
return true;
} else {
if (currentIds.isEmpty() && !removeIds.isEmpty() && currentIdentity == null) {
this.storageAccount.updateParameters.withIdentity(
new Identity().withType(IdentityType.NONE).withUserAssignedIdentities(null));
return true;
}
}
}
}
return false;
}
/**
* Initialize storage account's identity property.
*
* @param identityType the identity type to set
*/
} |
`if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type()))` corrected to `if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.updateParameters.identity().type()))` | private void initStorageAccountIdentity(IdentityType identityType, Boolean isWithout) {
if (!identityType.equals(IdentityType.USER_ASSIGNED)
&& !identityType.equals(IdentityType.SYSTEM_ASSIGNED)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid argument: " + identityType));
}
if (!isWithout) {
if (this.storageAccount.isInCreateMode()) {
if (Objects.isNull(this.storageAccount.createParameters.identity())) {
this.storageAccount.createParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.createParameters.identity().type())
|| this.storageAccount.createParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.createParameters.identity().type().equals(identityType)) {
this.storageAccount.createParameters.identity().withType(identityType);
} else {
this.storageAccount.createParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(this.storageAccount.innerModel().identity());
}
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.updateParameters.identity().type())
|| this.storageAccount.updateParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.updateParameters.identity().type().equals(identityType)) {
this.storageAccount.updateParameters.identity().withType(identityType);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(this.storageAccount.innerModel().identity());
}
if (IdentityType.SYSTEM_ASSIGNED.equals(identityType)) {
if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.USER_ASSIGNED);
}
}
}
} | if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) { | private void initStorageAccountIdentity(IdentityType identityType, Boolean isWithout) {
if (!identityType.equals(IdentityType.USER_ASSIGNED)
&& !identityType.equals(IdentityType.SYSTEM_ASSIGNED)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Invalid argument: " + identityType));
}
if (this.storageAccount.isInCreateMode()) {
if (Objects.isNull(this.storageAccount.createParameters.identity())) {
this.storageAccount.createParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.createParameters.identity().type())
|| this.storageAccount.createParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.createParameters.identity().type().equals(identityType)) {
this.storageAccount.createParameters.identity().withType(identityType);
} else {
this.storageAccount.createParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(this.storageAccount.innerModel().identity());
}
if (isWithout) {
if (IdentityType.SYSTEM_ASSIGNED.equals(identityType)) {
if (IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.updateParameters.identity().type())) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.USER_ASSIGNED);
}
}
} else {
if (Objects.isNull(this.storageAccount.updateParameters.identity())) {
this.storageAccount.updateParameters.withIdentity(new Identity());
}
if (Objects.isNull(this.storageAccount.updateParameters.identity().type())
|| this.storageAccount.updateParameters.identity().type().equals(IdentityType.NONE)
|| this.storageAccount.updateParameters.identity().type().equals(identityType)) {
this.storageAccount.updateParameters.identity().withType(identityType);
} else {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED);
}
}
}
} | class StorageAccountMsiHandler extends RoleAssignmentHelper {
private final StorageAccountImpl storageAccount;
private List<String> creatableIdentityKeys;
private Map<String, UserAssignedIdentity> userAssignedIdentities;
private final ClientLogger logger = new ClientLogger(StorageAccountMsiHandler.class);
/**
* Creates StorageAccountMsiHandler.
*
* @param authorizationManager the graph rbac manager
* @param storageAccount the storage account to which MSI extension needs to be installed and for which role
* assignments needs to be created
*/
StorageAccountMsiHandler(final AuthorizationManager authorizationManager, StorageAccountImpl storageAccount) {
super(authorizationManager, storageAccount.taskGroup(), storageAccount.idProvider());
this.storageAccount = storageAccount;
this.creatableIdentityKeys = new ArrayList<>();
this.userAssignedIdentities = new HashMap<>();
}
/**
* Specifies that Local Managed Service Identity needs to be enabled in the storage account. If MSI extension is not
* already installed then it will be installed with access token port as 50342.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withLocalManagedServiceIdentity() {
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, false);
return this;
}
/**
* Specifies that Local Managed Service Identity needs to be disabled in the storage account.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutLocalManagedServiceIdentity() {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.USER_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, true);
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param creatableIdentity yet-to-be-created identity to be associated with the storage account
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withNewExternalManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
TaskGroup.HasTaskGroup dependency = (TaskGroup.HasTaskGroup) creatableIdentity;
Objects.requireNonNull(dependency);
this.storageAccount.taskGroup().addDependency(dependency);
this.creatableIdentityKeys.add(creatableIdentity.key());
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param identity an identity to associate
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withExistingExternalManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
return this;
}
/**
* Specifies that given identity should be removed from the list of External Managed Service Identity associated
* with the storage account.
*
* @param identityId resource id of the identity
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutExternalManagedServiceIdentity(String identityId) {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, true);
this.userAssignedIdentities.put(identityId, null);
return this;
}
void processCreatedExternalIdentities() {
for (String key : this.creatableIdentityKeys) {
com.azure.resourcemanager.msi.models.Identity identity = (com.azure.resourcemanager.msi.models.Identity) this.storageAccount.taskGroup().taskResult(key);
Objects.requireNonNull(identity);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
}
this.creatableIdentityKeys.clear();
}
void handleExternalIdentities() {
if (storageAccount.isInCreateMode()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.createParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
}
} else {
if (!this.handleRemoveAllExternalIdentitiesCase()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
} else {
if (this.storageAccount.updateParameters.identity() != null) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
}
}
}
}
}
/** Clear StorageAccountMsiHandler post-run specific internal state. */
void clear() {
this.userAssignedIdentities = new HashMap<>();
}
/**
* Method that handle the case where user request indicates all it want to do is remove all identities associated
* with the storage account.
*
* @return true if user indented to remove all the identities.
*/
private boolean handleRemoveAllExternalIdentitiesCase() {
if (!this.userAssignedIdentities.isEmpty()) {
int rmCount = 0;
for (UserAssignedIdentity v : this.userAssignedIdentities.values()) {
if (v == null) {
rmCount++;
} else {
break;
}
}
boolean containsRemoveOnly = rmCount > 0 && rmCount == this.userAssignedIdentities.size();
if (containsRemoveOnly) {
Set<String> currentIds = new HashSet<>();
Identity currentIdentity = this.storageAccount.updateParameters.identity();
if (currentIdentity != null && currentIdentity.userAssignedIdentities() != null) {
for (String id : currentIdentity.userAssignedIdentities().keySet()) {
currentIds.add(id.toLowerCase(Locale.ROOT));
}
}
Set<String> removeIds = new HashSet<>();
for (Map.Entry<String, UserAssignedIdentity> entrySet
: this.userAssignedIdentities.entrySet()) {
if (entrySet.getValue() == null) {
removeIds.add(entrySet.getKey().toLowerCase(Locale.ROOT));
}
}
boolean removeAllCurrentIds =
currentIds.size() == removeIds.size() && currentIds.containsAll(removeIds);
if (removeAllCurrentIds) {
if (currentIdentity == null || currentIdentity.type() == null) {
this.storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE));
} else if (currentIdentity.type().equals(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
} else if (currentIdentity.type().equals(IdentityType.USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
}
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
return true;
} else {
if (currentIds.isEmpty() && !removeIds.isEmpty() && currentIdentity == null) {
this.storageAccount.updateParameters.withIdentity(
new Identity().withType(IdentityType.NONE).withUserAssignedIdentities(null));
return true;
}
}
}
}
return false;
}
/**
* Initialize storage account's identity property.
*
* @param identityType the identity type to set
*/
} | class StorageAccountMsiHandler extends RoleAssignmentHelper {
private final StorageAccountImpl storageAccount;
private List<String> creatableIdentityKeys;
private Map<String, UserAssignedIdentity> userAssignedIdentities;
private final ClientLogger logger = new ClientLogger(StorageAccountMsiHandler.class);
/**
* Creates StorageAccountMsiHandler.
*
* @param authorizationManager the graph rbac manager
* @param storageAccount the storage account to which MSI extension needs to be installed and for which role
* assignments needs to be created
*/
StorageAccountMsiHandler(final AuthorizationManager authorizationManager, StorageAccountImpl storageAccount) {
super(authorizationManager, storageAccount.taskGroup(), storageAccount.idProvider());
this.storageAccount = storageAccount;
this.creatableIdentityKeys = new ArrayList<>();
this.userAssignedIdentities = new HashMap<>();
}
/**
* Specifies that Local Managed Service Identity needs to be enabled in the storage account. If MSI extension is not
* already installed then it will be installed with access token port as 50342.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withLocalManagedServiceIdentity() {
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, false);
return this;
}
/**
* Specifies that Local Managed Service Identity needs to be disabled in the storage account.
*
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutLocalManagedServiceIdentity() {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.USER_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.SYSTEM_ASSIGNED, true);
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param creatableIdentity yet-to-be-created identity to be associated with the storage account
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withNewExternalManagedServiceIdentity(Creatable<com.azure.resourcemanager.msi.models.Identity> creatableIdentity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
TaskGroup.HasTaskGroup dependency = (TaskGroup.HasTaskGroup) creatableIdentity;
Objects.requireNonNull(dependency);
this.storageAccount.taskGroup().addDependency(dependency);
this.creatableIdentityKeys.add(creatableIdentity.key());
return this;
}
/**
* Specifies that given identity should be set as one of the External Managed Service Identity of the storage
* account.
*
* @param identity an identity to associate
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withExistingExternalManagedServiceIdentity(com.azure.resourcemanager.msi.models.Identity identity) {
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, false);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
return this;
}
/**
* Specifies that given identity should be removed from the list of External Managed Service Identity associated
* with the storage account.
*
* @param identityId resource id of the identity
* @return StorageAccountMsiHandler
*/
StorageAccountMsiHandler withoutExternalManagedServiceIdentity(String identityId) {
if (this.storageAccount.innerModel().identity() == null
|| this.storageAccount.innerModel().identity().type() == null
|| IdentityType.NONE.equals(this.storageAccount.innerModel().identity().type())
|| IdentityType.SYSTEM_ASSIGNED.equals(this.storageAccount.innerModel().identity().type())) {
return this;
}
this.initStorageAccountIdentity(IdentityType.USER_ASSIGNED, true);
this.userAssignedIdentities.put(identityId, null);
return this;
}
void processCreatedExternalIdentities() {
for (String key : this.creatableIdentityKeys) {
com.azure.resourcemanager.msi.models.Identity identity = (com.azure.resourcemanager.msi.models.Identity) this.storageAccount.taskGroup().taskResult(key);
Objects.requireNonNull(identity);
this.userAssignedIdentities.put(identity.id(), new UserAssignedIdentity());
}
this.creatableIdentityKeys.clear();
}
void handleExternalIdentities() {
if (storageAccount.isInCreateMode()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.createParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
}
} else {
if (!this.handleRemoveAllExternalIdentitiesCase()) {
if (!this.userAssignedIdentities.isEmpty()) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(this.userAssignedIdentities);
} else {
if (this.storageAccount.updateParameters.identity() != null) {
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
}
}
}
}
}
/** Clear StorageAccountMsiHandler post-run specific internal state. */
void clear() {
this.userAssignedIdentities = new HashMap<>();
}
/**
* Method that handles the case where user request indicates all he/she wants to do is to remove all associated identities。
*
* @return true if user intends to remove all the identities
*/
private boolean handleRemoveAllExternalIdentitiesCase() {
if (!this.userAssignedIdentities.isEmpty()) {
int rmCount = 0;
for (UserAssignedIdentity v : this.userAssignedIdentities.values()) {
if (v == null) {
rmCount++;
} else {
break;
}
}
boolean containsRemoveOnly = rmCount > 0 && rmCount == this.userAssignedIdentities.size();
if (containsRemoveOnly) {
Set<String> currentIds = new HashSet<>();
Identity currentIdentity = this.storageAccount.updateParameters.identity();
if (currentIdentity != null && currentIdentity.userAssignedIdentities() != null) {
for (String id : currentIdentity.userAssignedIdentities().keySet()) {
currentIds.add(id.toLowerCase(Locale.ROOT));
}
}
Set<String> removeIds = new HashSet<>();
for (Map.Entry<String, UserAssignedIdentity> entrySet
: this.userAssignedIdentities.entrySet()) {
if (entrySet.getValue() == null) {
removeIds.add(entrySet.getKey().toLowerCase(Locale.ROOT));
}
}
boolean removeAllCurrentIds =
currentIds.size() == removeIds.size() && currentIds.containsAll(removeIds);
if (removeAllCurrentIds) {
if (currentIdentity == null || currentIdentity.type() == null) {
this.storageAccount.updateParameters.withIdentity(new Identity().withType(IdentityType.NONE));
} else if (currentIdentity.type().equals(IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.SYSTEM_ASSIGNED);
} else if (currentIdentity.type().equals(IdentityType.USER_ASSIGNED)) {
this.storageAccount.updateParameters.identity().withType(IdentityType.NONE);
}
this.storageAccount.updateParameters.identity().withUserAssignedIdentities(null);
return true;
} else {
if (currentIds.isEmpty() && !removeIds.isEmpty() && currentIdentity == null) {
this.storageAccount.updateParameters.withIdentity(
new Identity().withType(IdentityType.NONE).withUserAssignedIdentities(null));
return true;
}
}
}
}
return false;
}
/**
* Initialize storage account's identity property.
*
* @param identityType the identity type to set
*/
} |
nice! | private RequestBody toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) {
if (bodyContent == null) {
return EMPTY_REQUEST_BODY;
}
String contentType = headers.getValue(HttpHeaderName.CONTENT_TYPE);
MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType);
BinaryDataContent content = BinaryDataHelper.getContent(bodyContent);
long effectiveContentLength = getRequestContentLength(content, headers);
if (content instanceof FluxByteBufferContent) {
return new OkHttpFluxRequestBody(
content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis());
} else {
return new BinaryDataRequestBody(bodyContent, mediaType, effectiveContentLength);
}
} | return new BinaryDataRequestBody(bodyContent, mediaType, effectiveContentLength); | private RequestBody toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) {
if (bodyContent == null) {
return EMPTY_REQUEST_BODY;
}
String contentType = headers.getValue(HttpHeaderName.CONTENT_TYPE);
MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType);
BinaryDataContent content = BinaryDataHelper.getContent(bodyContent);
long effectiveContentLength = getRequestContentLength(content, headers);
if (content instanceof FluxByteBufferContent) {
return new OkHttpFluxRequestBody(
content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis());
} else {
return new BinaryDataRequestBody(bodyContent, mediaType, effectiveContentLength);
}
} | class OkHttpAsyncHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpAsyncHttpClient.class);
private static final byte[] EMPTY_BODY = new byte[0];
private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY);
private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response";
private static final String AZURE_IGNORE_RESPONSE_BODY = "azure-ignore-response-body";
private static final String AZURE_EAGERLY_CONVERT_HEADERS = "azure-eagerly-convert-headers";
final OkHttpClient httpClient;
OkHttpAsyncHttpClient(OkHttpClient httpClient) {
this.httpClient = httpClient;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(AZURE_IGNORE_RESPONSE_BODY).orElse(false);
boolean eagerlyConvertHeaders = (boolean) context.getData(AZURE_EAGERLY_CONVERT_HEADERS).orElse(false);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
return Mono.create(sink -> sink.onRequest(value -> {
Mono.fromCallable(() -> toOkHttpRequest(request, progressReporter))
.subscribe(okHttpRequest -> {
try {
Call call = httpClient.newCall(okHttpRequest);
call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders));
sink.onCancel(call::cancel);
} catch (Exception ex) {
sink.error(ex);
}
}, sink::error);
}));
}
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(AZURE_IGNORE_RESPONSE_BODY).orElse(false);
boolean eagerlyConvertHeaders = (boolean) context.getData(AZURE_EAGERLY_CONVERT_HEADERS).orElse(false);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
Request okHttpRequest = toOkHttpRequest(request, progressReporter);
try {
Response okHttpResponse = httpClient.newCall(okHttpRequest).execute();
return toHttpResponse(request, okHttpResponse, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
/**
* Converts the given azure-core request to okhttp request.
*
* @param request the azure-core request
* @param progressReporter the {@link ProgressReporter}. Can be null.
* @return the okhttp request
*/
private okhttp3.Request toOkHttpRequest(HttpRequest request, ProgressReporter progressReporter) {
Request.Builder requestBuilder = new Request.Builder()
.url(request.getUrl());
if (request.getHeaders() != null) {
for (HttpHeader hdr : request.getHeaders()) {
hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value));
}
}
if (request.getHttpMethod() == HttpMethod.GET) {
return requestBuilder.get().build();
} else if (request.getHttpMethod() == HttpMethod.HEAD) {
return requestBuilder.head().build();
}
RequestBody okHttpRequestBody = toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders());
if (progressReporter != null) {
okHttpRequestBody = new OkHttpProgressReportingRequestBody(okHttpRequestBody, progressReporter);
}
return requestBuilder.method(request.getHttpMethod().toString(), okHttpRequestBody)
.build();
}
/**
* Create a Mono of okhttp3.RequestBody from the given BinaryData.
*
* @param bodyContent The request body content
* @param headers the headers associated with the original request
* @return the Mono emitting okhttp request
*/
private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) {
Long contentLength = content.getLength();
if (contentLength == null) {
String contentLengthHeaderValue = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLengthHeaderValue != null) {
contentLength = Long.parseLong(contentLengthHeaderValue);
} else {
contentLength = -1L;
}
}
return contentLength;
}
private static HttpResponse toHttpResponse(HttpRequest request, okhttp3.Response response,
boolean eagerlyReadResponse, boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
/*
* Use a buffered response when we are eagerly reading the response from the network and the body isn't
* empty.
*/
if (eagerlyReadResponse || ignoreResponseBody) {
try (ResponseBody body = response.body()) {
byte[] bytes = (body != null) ? body.bytes() : EMPTY_BODY;
return new OkHttpAsyncBufferedResponse(response, request, bytes, eagerlyConvertHeaders);
}
} else {
return new OkHttpAsyncResponse(response, request, eagerlyConvertHeaders);
}
}
private static class OkHttpCallback implements okhttp3.Callback {
private final MonoSink<HttpResponse> sink;
private final HttpRequest request;
private final boolean eagerlyReadResponse;
private final boolean ignoreResponseBody;
private final boolean eagerlyConvertHeaders;
OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) {
this.sink = sink;
this.request = request;
this.eagerlyReadResponse = eagerlyReadResponse;
this.ignoreResponseBody = ignoreResponseBody;
this.eagerlyConvertHeaders = eagerlyConvertHeaders;
}
@SuppressWarnings("NullableProblems")
@Override
public void onFailure(okhttp3.Call call, IOException e) {
if (e.getSuppressed().length == 1) {
sink.error(e.getSuppressed()[0]);
} else {
sink.error(e);
}
}
@SuppressWarnings("NullableProblems")
@Override
public void onResponse(okhttp3.Call call, okhttp3.Response response) {
try {
sink.success(toHttpResponse(request, response, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders));
} catch (IOException ex) {
sink.error(ex);
}
}
}
} | class OkHttpAsyncHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(OkHttpAsyncHttpClient.class);
private static final byte[] EMPTY_BODY = new byte[0];
private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY);
private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response";
private static final String AZURE_IGNORE_RESPONSE_BODY = "azure-ignore-response-body";
private static final String AZURE_EAGERLY_CONVERT_HEADERS = "azure-eagerly-convert-headers";
final OkHttpClient httpClient;
OkHttpAsyncHttpClient(OkHttpClient httpClient) {
this.httpClient = httpClient;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
public Mono<HttpResponse> send(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(AZURE_IGNORE_RESPONSE_BODY).orElse(false);
boolean eagerlyConvertHeaders = (boolean) context.getData(AZURE_EAGERLY_CONVERT_HEADERS).orElse(false);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
return Mono.create(sink -> sink.onRequest(value -> {
Mono.fromCallable(() -> toOkHttpRequest(request, progressReporter))
.subscribe(okHttpRequest -> {
try {
Call call = httpClient.newCall(okHttpRequest);
call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders));
sink.onCancel(call::cancel);
} catch (Exception ex) {
sink.error(ex);
}
}, sink::error);
}));
}
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
boolean eagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false);
boolean ignoreResponseBody = (boolean) context.getData(AZURE_IGNORE_RESPONSE_BODY).orElse(false);
boolean eagerlyConvertHeaders = (boolean) context.getData(AZURE_EAGERLY_CONVERT_HEADERS).orElse(false);
ProgressReporter progressReporter = Contexts.with(context).getHttpRequestProgressReporter();
Request okHttpRequest = toOkHttpRequest(request, progressReporter);
try {
Response okHttpResponse = httpClient.newCall(okHttpRequest).execute();
return toHttpResponse(request, okHttpResponse, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
/**
* Converts the given azure-core request to okhttp request.
*
* @param request the azure-core request
* @param progressReporter the {@link ProgressReporter}. Can be null.
* @return the okhttp request
*/
private okhttp3.Request toOkHttpRequest(HttpRequest request, ProgressReporter progressReporter) {
Request.Builder requestBuilder = new Request.Builder()
.url(request.getUrl());
if (request.getHeaders() != null) {
for (HttpHeader hdr : request.getHeaders()) {
hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value));
}
}
if (request.getHttpMethod() == HttpMethod.GET) {
return requestBuilder.get().build();
} else if (request.getHttpMethod() == HttpMethod.HEAD) {
return requestBuilder.head().build();
}
RequestBody okHttpRequestBody = toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders());
if (progressReporter != null) {
okHttpRequestBody = new OkHttpProgressReportingRequestBody(okHttpRequestBody, progressReporter);
}
return requestBuilder.method(request.getHttpMethod().toString(), okHttpRequestBody)
.build();
}
/**
* Create a Mono of okhttp3.RequestBody from the given BinaryData.
*
* @param bodyContent The request body content
* @param headers the headers associated with the original request
* @return the Mono emitting okhttp request
*/
private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) {
Long contentLength = content.getLength();
if (contentLength == null) {
String contentLengthHeaderValue = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (contentLengthHeaderValue != null) {
contentLength = Long.parseLong(contentLengthHeaderValue);
} else {
contentLength = -1L;
}
}
return contentLength;
}
private static HttpResponse toHttpResponse(HttpRequest request, okhttp3.Response response,
boolean eagerlyReadResponse, boolean ignoreResponseBody, boolean eagerlyConvertHeaders) throws IOException {
/*
* Use a buffered response when we are eagerly reading the response from the network and the body isn't
* empty.
*/
if (eagerlyReadResponse || ignoreResponseBody) {
try (ResponseBody body = response.body()) {
byte[] bytes = (body != null) ? body.bytes() : EMPTY_BODY;
return new OkHttpAsyncBufferedResponse(response, request, bytes, eagerlyConvertHeaders);
}
} else {
return new OkHttpAsyncResponse(response, request, eagerlyConvertHeaders);
}
}
private static class OkHttpCallback implements okhttp3.Callback {
private final MonoSink<HttpResponse> sink;
private final HttpRequest request;
private final boolean eagerlyReadResponse;
private final boolean ignoreResponseBody;
private final boolean eagerlyConvertHeaders;
OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse,
boolean ignoreResponseBody, boolean eagerlyConvertHeaders) {
this.sink = sink;
this.request = request;
this.eagerlyReadResponse = eagerlyReadResponse;
this.ignoreResponseBody = ignoreResponseBody;
this.eagerlyConvertHeaders = eagerlyConvertHeaders;
}
@SuppressWarnings("NullableProblems")
@Override
public void onFailure(okhttp3.Call call, IOException e) {
if (e.getSuppressed().length == 1) {
sink.error(e.getSuppressed()[0]);
} else {
sink.error(e);
}
}
@SuppressWarnings("NullableProblems")
@Override
public void onResponse(okhttp3.Call call, okhttp3.Response response) {
try {
sink.success(toHttpResponse(request, response, eagerlyReadResponse, ignoreResponseBody,
eagerlyConvertHeaders));
} catch (IOException ex) {
sink.error(ex);
}
}
}
} |
nit and future: maybe have an util method for it? I might be wrong, but I think we have a few places where we copy input to output stream | public void writeTo(OutputStream outputStream) throws IOException {
InputStream inputStream = content.get();
if (bufferedContent != null) {
for (ByteBuffer bb : bufferedContent) {
ImplUtils.writeByteBufferToStream(bb, outputStream);
}
} else {
byte[] buffer = new byte[STREAM_READ_SIZE];
int read;
while ((read = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, read);
}
}
} | byte[] buffer = new byte[STREAM_READ_SIZE]; | public void writeTo(OutputStream outputStream) throws IOException {
writeTo(Channels.newChannel(outputStream));
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private static final int INITIAL_BUFFER_CHUNK_SIZE = 8 * 1024;
private static final int MAX_BUFFER_CHUNK_SIZE = 8 * 1024 * 1024;
private static final int MAX_ARRAY_LENGTH = Integer.MAX_VALUE - 8;
private final Supplier<InputStream> content;
private final Long length;
private final boolean isReplayable;
private final List<ByteBuffer> bufferedContent;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<InputStreamContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(InputStreamContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @param length The length of the content.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream, Long length) {
Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
this.length = length;
this.isReplayable = canMarkReset(inputStream, length);
if (this.isReplayable) {
inputStream.mark(length.intValue());
this.content = () -> resettableContent(inputStream);
} else {
this.content = () -> inputStream;
}
this.bufferedContent = null;
}
private InputStreamContent(Supplier<InputStream> inputStreamSupplier, Long length,
List<ByteBuffer> bufferedContent) {
this.content = Objects.requireNonNull(inputStreamSupplier, "'inputStreamSupplier' cannot be null.");
this.length = length;
this.isReplayable = true;
this.bufferedContent = bufferedContent;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content.get();
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
if (bufferedContent != null) {
return Flux.fromIterable(bufferedContent).map(ByteBuffer::asReadOnlyBuffer);
} else {
return FluxUtil.toFluxByteBuffer(this.content.get(), STREAM_READ_SIZE);
}
}
@Override
@Override
public void writeTo(WritableByteChannel channel) throws IOException {
InputStream inputStream = content.get();
if (bufferedContent != null) {
for (ByteBuffer bb : bufferedContent) {
bb = bb.duplicate();
while (bb.hasRemaining()) {
channel.write(bb);
}
}
} else {
byte[] buffer = new byte[STREAM_READ_SIZE];
int read;
while ((read = inputStream.read(buffer)) != -1) {
ByteBuffer bb = ByteBuffer.wrap(buffer, 0, read);
while (bb.hasRemaining()) {
channel.write(bb);
}
}
}
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
return readAndBuffer(this.content.get(), length);
}
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
InputStream inputStream = this.content.get();
return Mono.just(inputStream)
.publishOn(Schedulers.boundedElastic())
.map(is -> readAndBuffer(is, length));
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private static boolean canMarkReset(InputStream inputStream, Long length) {
return length != null && length < MAX_ARRAY_LENGTH && inputStream.markSupported();
}
private static InputStream resettableContent(InputStream stream) {
try {
stream.reset();
return stream;
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
private static InputStreamContent readAndBuffer(InputStream inputStream, Long length) {
try {
Tuple2<Long, List<ByteBuffer>> streamRead = StreamUtil.readStreamToListOfByteBuffers(
inputStream, length, INITIAL_BUFFER_CHUNK_SIZE, MAX_BUFFER_CHUNK_SIZE);
long readLength = streamRead.getT1();
List<ByteBuffer> byteBuffers = streamRead.getT2();
if (length == null || length != readLength) {
return new InputStreamContent(() -> new IterableOfByteBuffersInputStream(byteBuffers), readLength,
byteBuffers);
} else {
return new InputStreamContent(() -> new IterableOfByteBuffersInputStream(byteBuffers), length,
byteBuffers);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
private byte[] getBytes() {
try {
AccessibleByteArrayOutputStream dataOutputBuffer = (length == null || length < MAX_ARRAY_LENGTH)
? new AccessibleByteArrayOutputStream() : new AccessibleByteArrayOutputStream(length.intValue());
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
InputStream inputStream = this.content.get();
while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private static final int INITIAL_BUFFER_CHUNK_SIZE = 8 * 1024;
private static final int MAX_BUFFER_CHUNK_SIZE = 8 * 1024 * 1024;
private static final int MAX_ARRAY_LENGTH = Integer.MAX_VALUE - 8;
private final Supplier<InputStream> content;
private final Long length;
private final boolean isReplayable;
private final List<ByteBuffer> bufferedContent;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<InputStreamContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(InputStreamContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @param length The length of the content.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream, Long length) {
Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
this.length = length;
this.isReplayable = canMarkReset(inputStream, length);
if (this.isReplayable) {
inputStream.mark(length.intValue());
this.content = () -> resettableContent(inputStream);
} else {
this.content = () -> inputStream;
}
this.bufferedContent = null;
}
private InputStreamContent(Supplier<InputStream> inputStreamSupplier, Long length,
List<ByteBuffer> bufferedContent) {
this.content = Objects.requireNonNull(inputStreamSupplier, "'inputStreamSupplier' cannot be null.");
this.length = length;
this.isReplayable = true;
this.bufferedContent = bufferedContent;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content.get();
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
if (bufferedContent != null) {
return Flux.fromIterable(bufferedContent).map(ByteBuffer::asReadOnlyBuffer);
} else {
return FluxUtil.toFluxByteBuffer(this.content.get(), STREAM_READ_SIZE);
}
}
@Override
@Override
public void writeTo(WritableByteChannel channel) throws IOException {
InputStream inputStream = content.get();
if (bufferedContent != null) {
for (ByteBuffer bb : bufferedContent) {
ImplUtils.fullyWriteBuffer(bb.duplicate(), channel);
}
} else {
IOUtils.transfer(Channels.newChannel(inputStream), channel, length);
}
}
@Override
public Mono<Void> writeTo(AsynchronousByteChannel channel) {
if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return FluxUtil.writeToAsynchronousByteChannel(toFluxByteBuffer(), channel);
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
return readAndBuffer(this.content.get(), length);
}
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
InputStream inputStream = this.content.get();
return Mono.just(inputStream)
.publishOn(Schedulers.boundedElastic())
.map(is -> readAndBuffer(is, length));
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private static boolean canMarkReset(InputStream inputStream, Long length) {
return length != null && length < MAX_ARRAY_LENGTH && inputStream.markSupported();
}
private static InputStream resettableContent(InputStream stream) {
try {
stream.reset();
return stream;
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
private static InputStreamContent readAndBuffer(InputStream inputStream, Long length) {
try {
Tuple2<Long, List<ByteBuffer>> streamRead = StreamUtil.readStreamToListOfByteBuffers(
inputStream, length, INITIAL_BUFFER_CHUNK_SIZE, MAX_BUFFER_CHUNK_SIZE);
long readLength = streamRead.getT1();
List<ByteBuffer> byteBuffers = streamRead.getT2();
if (length == null || length != readLength) {
return new InputStreamContent(() -> new IterableOfByteBuffersInputStream(byteBuffers), readLength,
byteBuffers);
} else {
return new InputStreamContent(() -> new IterableOfByteBuffersInputStream(byteBuffers), length,
byteBuffers);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
private byte[] getBytes() {
try {
AccessibleByteArrayOutputStream dataOutputBuffer = (length == null || length < MAX_ARRAY_LENGTH)
? new AccessibleByteArrayOutputStream() : new AccessibleByteArrayOutputStream(length.intValue());
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
InputStream inputStream = this.content.get();
while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} |
I really like `writeTo` APIs! It seems we don't have much test coverage for them on binaryData/Content, mind adding some tests? | public void writeTo(OutputStream outputStream) throws IOException {
Objects.requireNonNull(outputStream, "'outputStream' cannot be null.");
content.writeTo(outputStream);
} | content.writeTo(outputStream); | public void writeTo(OutputStream outputStream) throws IOException {
Objects.requireNonNull(outputStream, "'outputStream' cannot be null.");
content.writeTo(outputStream);
} | class BinaryData {
private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class);
static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true);
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
private final BinaryDataContent content;
BinaryData(BinaryDataContent content) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
}
static {
BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() {
@Override
public BinaryData createBinaryData(BinaryDataContent content) {
return new BinaryData(content);
}
@Override
public BinaryDataContent getContent(BinaryData binaryData) {
return binaryData.content;
}
});
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of
* inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream
* content is not cached if the stream is not read into a format that requires the content to be fully read into
* memory.
* <p>
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
* </p>
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStream
* <pre>
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
* BinaryData binaryData = BinaryData.fromStream&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStream
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static BinaryData fromStream(InputStream inputStream) {
return fromStream(inputStream, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of
* inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream
* content is not cached if the stream is not read into a format that requires the content to be fully read into
* memory.
* <p>
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
* </p>
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStream
* <pre>
* byte[] bytes = "Some Data".getBytes&
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
* BinaryData binaryData = BinaryData.fromStream&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStream
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static BinaryData fromStream(InputStream inputStream, Long length) {
return new BinaryData(new InputStreamContent(inputStream, length));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}.
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync
* <pre>
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromStreamAsync&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStreamAsync
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) {
return fromStreamAsync(inputStream, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}.
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync
* <pre>
* byte[] bytes = "Some Data".getBytes&
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromStreamAsync&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStreamAsync
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static Mono<BinaryData> fromStreamAsync(InputStream inputStream, Long length) {
return Mono.fromCallable(() -> fromStream(inputStream, length));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <p>This method aggregates data into single byte array.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws NullPointerException If {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) {
return fromFlux(data, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <p>This method aggregates data into single byte array.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final long length = data.length;
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws IllegalArgumentException if the length is less than zero.
* @throws NullPointerException if {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) {
return fromFlux(data, length, true);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
* <p>
* If {@code bufferContent} is true and {@code length} is null the length of the returned {@link BinaryData} will be
* based on the length calculated by buffering. If {@code length} is non-null it will always be used as the
* {@link BinaryData} length even if buffering determines a different length.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final long length = data.length;
* final boolean shouldAggregateData = false;
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or consumption deferred.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws IllegalArgumentException if the length is less than zero.
* @throws NullPointerException if {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) {
if (data == null) {
return monoError(LOGGER, new NullPointerException("'data' cannot be null."));
}
if (length != null && length < 0) {
return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0."));
}
if (!bufferContent) {
return Mono.just(new BinaryData(new FluxByteBufferContent(data, length)));
}
long[] trueLength = new long[]{0};
return data.map(buffer -> {
int bufferSize = buffer.remaining();
ByteBuffer copy = ByteBuffer.allocate(bufferSize);
trueLength[0] += bufferSize;
copy.put(buffer);
copy.flip();
return copy;
})
.collect(LinkedList::new, (BiConsumer<LinkedList<ByteBuffer>, ByteBuffer>) LinkedList::add)
.map(buffers -> {
return new BinaryData(new FluxByteBufferContent(Flux.fromIterable(buffers).map(ByteBuffer::duplicate),
(length != null) ? length : trueLength[0], true));
});
}
/**
* Creates an instance of {@link BinaryData} from the given {@link String}.
* <p>
* The {@link String} is converted into bytes using {@link String
* {@link StandardCharsets
* </p>
* <p><strong>Create an instance from a String</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromString
* <pre>
* final String data = "Some Data";
* &
* BinaryData binaryData = BinaryData.fromString&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromString
*
* @param data The {@link String} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link String}.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromString(String data) {
return new BinaryData(new StringContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given byte array.
* <p>
* If the byte array is zero length an empty {@link BinaryData} will be returned. Note that the input byte array is
* used as a reference by this instance of {@link BinaryData} and any changes to the byte array outside of this
* instance will result in the contents of this BinaryData instance being updated as well. To safely update the byte
* array without impacting the BinaryData instance, perform an array copy first.
* </p>
*
* <p><strong>Create an instance from a byte array</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromBytes
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromBytes&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromBytes
*
* @param data The byte array that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the byte array.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromBytes(byte[] data) {
return new BinaryData(new ByteArrayContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link ByteBuffer}.
* <p>
* If the {@link ByteBuffer} is zero length an empty {@link BinaryData} will be returned. Note that the input
* {@link ByteBuffer} is used as a reference by this instance of {@link BinaryData} and any changes to the
* {@link ByteBuffer} outside of this instance will result in the contents of this BinaryData instance being updated
* as well. To safely update the {@link ByteBuffer} without impacting the BinaryData instance, perform an array copy
* first.
* </p>
*
* <p><strong>Create an instance from a ByteBuffer</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromByteBuffer
* <pre>
* final ByteBuffer data = ByteBuffer.wrap&
* BinaryData binaryData = BinaryData.fromByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromByteBuffer
*
* @param data The {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link ByteBuffer}.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromByteBuffer(ByteBuffer data) {
return new BinaryData(new ByteBufferContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link List} of {@link ByteBuffer}.
*
* <p>
* The input {@link ByteBuffer} instances are used as a reference by this instance of {@link BinaryData} and any
* changes to a {@link ByteBuffer} outside of this instance will result in the contents of this BinaryData instance
* being updated as well. To safely update the byte array without impacting the BinaryData instance, perform an
* array copy first.
* </p>
*
* <p><strong>Create an instance from a List<ByteBuffer></strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromListByteBuffer
* <pre>
* final List<ByteBuffer> data = Stream.of&
* .map&
* .collect&
* BinaryData binaryData = BinaryData.fromListByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromListByteBuffer
*
* @param data The {@link List} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link List} of {@link ByteBuffer}.
*/
public static BinaryData fromListByteBuffer(List<ByteBuffer> data) {
return new BinaryData(new ListByteBufferContent(data));
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default
* {@link JsonSerializer}.
*
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to serialize the object.
* </p>
* <p><strong>Creating an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* BinaryData binaryData = BinaryData.fromObject&
*
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObject
*
* @param data The object that will be JSON serialized that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the JSON serialized object.
* @throws NullPointerException If {@code data} is null.
* @see JsonSerializer
*/
public static BinaryData fromObject(Object data) {
return fromObject(data, SERIALIZER);
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default
* {@link JsonSerializer}.
*
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to serialize the object.
* </p>
* <p><strong>Creating an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* Disposable subscriber = BinaryData.fromObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObjectAsync
*
* @param data The object that will be JSON serialized that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object.
* @see JsonSerializer
*/
public static Mono<BinaryData> fromObjectAsync(Object data) {
return fromObjectAsync(data, SERIALIZER);
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed
* {@link ObjectSerializer}.
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
* </p>
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Create an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObject
*
* @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer}
* determines how {@code null} data is serialized.
* @param serializer The {@link ObjectSerializer} used to serialize object.
* @return A {@link BinaryData} representing the serialized object.
* @throws NullPointerException If {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public static BinaryData fromObject(Object data, ObjectSerializer serializer) {
return new BinaryData(new SerializableContent(data, serializer));
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed
* {@link ObjectSerializer}.
*
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
* </p>
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Create an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* final ObjectSerializer serializer = new MyJsonSerializer&
* Disposable subscriber = BinaryData.fromObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObjectAsync
*
* @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer}
* determines how {@code null} data is serialized.
* @param serializer The {@link ObjectSerializer} used to serialize object.
* @return A {@link Mono} of {@link BinaryData} representing the serialized object.
* @throws NullPointerException If {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) {
return Mono.fromCallable(() -> fromObject(data, serializer));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks
* for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is
* not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile -->
* <pre>
* BinaryData binaryData = BinaryData.fromFile&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile -->
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
*/
public static BinaryData fromFile(Path file) {
return fromFile(file, STREAM_READ_SIZE);
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* BinaryData binaryData = BinaryData.fromFile&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param chunkSize The requested size for each read of the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, int chunkSize) {
return new BinaryData(new FileContent(file, chunkSize, null, null));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* long position = 1024;
* long length = 100 * 1048;
* BinaryData binaryData = BinaryData.fromFile&
* new File&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param position Position, or offset, within the path where reading begins.
* @param length Maximum number of bytes to be read from the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, Long position, Long length) {
return new BinaryData(new FileContent(file, STREAM_READ_SIZE, position, length));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* long position = 1024;
* long length = 100 * 1048;
* int chunkSize = 8092;
* BinaryData binaryData = BinaryData.fromFile&
* new File&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param position Position, or offset, within the path where reading begins.
* @param length Maximum number of bytes to be read from the path.
* @param chunkSize The requested size for each read of the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, Long position, Long length, int chunkSize) {
return new BinaryData(new FileContent(file, chunkSize, position, length));
}
/**
* Returns a byte array representation of this {@link BinaryData}.
* <p>
* This method returns a reference to the underlying byte array. Modifying the contents of the returned byte array
* may change the content of this BinaryData instance. If the content source of this BinaryData instance is a file,
* an {@link InputStream}, or a {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte
* array, it is recommended to make a copy of the contents first.
* <p>
* If the {@link BinaryData} is larger than the maximum size allowed for a {@code byte[]} this will throw an
* {@link IllegalStateException}.
*
* @return A byte array representing this {@link BinaryData}.
* @throws IllegalStateException If the {@link BinaryData} is larger than the maximum size allowed for a
* {@code byte[]}.
*/
public byte[] toBytes() {
return content.toBytes();
}
/**
* Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8
* character set. A new instance of String is created each time this method is called.
* <p>
* If the {@link BinaryData} is larger than the maximum size allowed for a {@link String} this will throw an
* {@link IllegalStateException}.
*
* @return A {@link String} representing this {@link BinaryData}.
* @throws IllegalStateException If the {@link BinaryData} is larger than the maximum size allowed for a
* {@link String}.
*/
public String toString() {
return content.toString();
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param <T> Type of the deserialized Object.
* @param clazz The {@link Class} representing the Object's type.
* @return An {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} is null.
* @see JsonSerializer
*/
public <T> T toObject(Class<T> clazz) {
return toObject(TypeReference.createInstance(clazz), SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* &
* &
* &
* &
* &
*
*
* BinaryData binaryData = BinaryData.fromObject&
*
* List<Person> persons = binaryData.toObject&
* persons.forEach&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} is null.
* @see JsonSerializer
*/
public <T> T toObject(TypeReference<T> typeReference) {
return toObject(typeReference, SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param clazz The {@link Class} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) {
return toObject(TypeReference.createInstance(clazz), serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* &
* List<Person> persons = binaryData.toObject&
* persons.forEach&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
Objects.requireNonNull(typeReference, "'typeReference' cannot be null.");
Objects.requireNonNull(serializer, "'serializer' cannot be null.");
return content.toObject(typeReference, serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param clazz The {@link Class} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} is null.
* @see JsonSerializer
*/
public <T> Mono<T> toObjectAsync(Class<T> clazz) {
return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} is null.
* @see JsonSerializer
*/
public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) {
return toObjectAsync(typeReference, SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param clazz The {@link Class} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) {
return toObjectAsync(TypeReference.createInstance(clazz), serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData
* .toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData
* .toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) {
return Mono.fromCallable(() -> toObject(typeReference, serializer));
}
/**
* Returns an {@link InputStream} representation of this {@link BinaryData}.
*
* <p><strong>Get an InputStream from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toStream -->
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromStream&
* final byte[] bytes = new byte[data.length];
* try &
* inputStream.read&
* System.out.println&
* &
* </pre>
* <!-- end com.azure.core.util.BinaryData.toStream -->
*
* @return An {@link InputStream} representing the {@link BinaryData}.
*/
public InputStream toStream() {
return content.toStream();
}
/**
* Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}.
* <p>
* Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}.
*
* <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.util.BinaryData.toByteBuffer -->
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromBytes&
* final byte[] bytes = new byte[data.length];
* binaryData.toByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.util.BinaryData.toByteBuffer -->
*
* @return A read-only {@link ByteBuffer} representing the {@link BinaryData}.
*/
public ByteBuffer toByteBuffer() {
return content.toByteBuffer();
}
/**
* Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The content
* is not read from the underlying data source until the {@link Flux} is subscribed to.
*
* @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}.
*/
public Flux<ByteBuffer> toFluxByteBuffer() {
return content.toFluxByteBuffer();
}
/**
* Writes the contents of this {@link BinaryData} to the given {@link OutputStream}.
* <p>
* This method does not close the {@link OutputStream}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param outputStream The {@link OutputStream} to write the contents of this {@link BinaryData} to.
* @throws NullPointerException If {@code outputStream} is null.
* @throws IOException If an I/O error occurs.
*/
/**
* Writes the contents of this {@link BinaryData} to the given {@link WritableByteChannel}.
* <p>
* This method does not close the {@link WritableByteChannel}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param channel The {@link WritableByteChannel} to write the contents of this {@link BinaryData} to.
* @throws NullPointerException If {@code channel} is null.
* @throws IOException If an I/O error occurs.
*/
public void writeTo(WritableByteChannel channel) throws IOException {
Objects.requireNonNull(channel, "'channel' cannot be null.");
content.writeTo(channel);
}
/**
* Writes the contents of this {@link BinaryData} to the given {@link AsynchronousByteChannel}.
* <p>
* This method does not close the {@link AsynchronousByteChannel}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param channel The {@link AsynchronousByteChannel} to write the contents of this {@link BinaryData} to.
* @return A {@link Mono} the completes once content has been written or had an error writing.
* @throws NullPointerException If {@code channel} is null.
* @throws IOException If an I/O error occurs.
*/
public Mono<Void> writeToAsync(AsynchronousByteChannel channel) throws IOException {
if (channel == null) {
return Mono.error(new NullPointerException("'channel' cannot be null."));
}
return content.writeToAsync(channel);
}
/**
* Returns the length of the content, if it is known. The length can be {@code null} if the source did not specify
* the length or the length cannot be determined without reading the whole content.
*
* @return the length of the content, if it is known.
*/
public Long getLength() {
return content.getLength();
}
/**
* Returns a flag indicating whether the content can be repeatedly consumed using all accessors including
* {@link
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors simultaneously regardless of
* what this method returns.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayability -->
* <pre>
* BinaryData binaryData = binaryDataProducer&
*
* if &
* binaryData = binaryData.toReplayableBinaryData&
* &
*
* streamConsumer&
* streamConsumer&
* </pre>
* <!-- end com.azure.util.BinaryData.replayability -->
*
* <!-- src_embed com.azure.util.BinaryData.replayabilityAsync -->
* <pre>
* Mono.fromCallable&
* .flatMap&
* if &
* return Mono.just&
* &
* return binaryData.toReplayableBinaryDataAsync&
* &
* &
* .flatMap&
* fluxConsumer&
* .then&
* .subscribe&
* </pre>
* <!-- end com.azure.util.BinaryData.replayabilityAsync -->
*
* @return a flag indicating whether the content can be repeatedly consumed using all accessors.
*/
public boolean isReplayable() {
return content.isReplayable();
}
/**
* Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed
* repeatedly using all accessors including {@link
*
* <p>
* A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and
* resetting a stream or buffering in memory are employed to assure replayability.
* </p>
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData}
* simultaneously.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayability -->
* <pre>
* BinaryData binaryData = binaryDataProducer&
*
* if &
* binaryData = binaryData.toReplayableBinaryData&
* &
*
* streamConsumer&
* streamConsumer&
* </pre>
* <!-- end com.azure.util.BinaryData.replayability -->
*
* @return Replayable {@link BinaryData}.
*/
public BinaryData toReplayableBinaryData() {
if (this.isReplayable()) {
return this;
} else {
return new BinaryData(content.toReplayableContent());
}
}
/**
* Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed
* repeatedly using all accessors including {@link
*
* <p>
* A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and
* resetting a stream or buffering in memory are employed to assure replayability.
* </p>
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData}
* simultaneously.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayabilityAsync -->
* <pre>
* Mono.fromCallable&
* .flatMap&
* if &
* return Mono.just&
* &
* return binaryData.toReplayableBinaryDataAsync&
* &
* &
* .flatMap&
* fluxConsumer&
* .then&
* .subscribe&
* </pre>
* <!-- end com.azure.util.BinaryData.replayabilityAsync -->
*
* @return A {@link Mono} of {@link BinaryData} representing the replayable {@link BinaryData}.
*/
public Mono<BinaryData> toReplayableBinaryDataAsync() {
if (isReplayable()) {
return Mono.just(this);
} else {
return content.toReplayableContentAsync().map(BinaryData::new);
}
}
} | class BinaryData {
private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class);
static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true);
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
private final BinaryDataContent content;
BinaryData(BinaryDataContent content) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
}
static {
BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() {
@Override
public BinaryData createBinaryData(BinaryDataContent content) {
return new BinaryData(content);
}
@Override
public BinaryDataContent getContent(BinaryData binaryData) {
return binaryData.content;
}
});
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of
* inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream
* content is not cached if the stream is not read into a format that requires the content to be fully read into
* memory.
* <p>
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
* </p>
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStream
* <pre>
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
* BinaryData binaryData = BinaryData.fromStream&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStream
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static BinaryData fromStream(InputStream inputStream) {
return fromStream(inputStream, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of
* inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream
* content is not cached if the stream is not read into a format that requires the content to be fully read into
* memory.
* <p>
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
* </p>
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStream
* <pre>
* byte[] bytes = "Some Data".getBytes&
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
* BinaryData binaryData = BinaryData.fromStream&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStream
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static BinaryData fromStream(InputStream inputStream, Long length) {
return new BinaryData(new InputStreamContent(inputStream, length));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}.
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync
* <pre>
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromStreamAsync&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStreamAsync
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) {
return fromStreamAsync(inputStream, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}.
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync
* <pre>
* byte[] bytes = "Some Data".getBytes&
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromStreamAsync&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStreamAsync
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static Mono<BinaryData> fromStreamAsync(InputStream inputStream, Long length) {
return Mono.fromCallable(() -> fromStream(inputStream, length));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <p>This method aggregates data into single byte array.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws NullPointerException If {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) {
return fromFlux(data, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <p>This method aggregates data into single byte array.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final long length = data.length;
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws IllegalArgumentException if the length is less than zero.
* @throws NullPointerException if {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) {
return fromFlux(data, length, true);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
* <p>
* If {@code bufferContent} is true and {@code length} is null the length of the returned {@link BinaryData} will be
* based on the length calculated by buffering. If {@code length} is non-null it will always be used as the
* {@link BinaryData} length even if buffering determines a different length.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final long length = data.length;
* final boolean shouldAggregateData = false;
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or consumption deferred.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws IllegalArgumentException if the length is less than zero.
* @throws NullPointerException if {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) {
if (data == null) {
return monoError(LOGGER, new NullPointerException("'data' cannot be null."));
}
if (length != null && length < 0) {
return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0."));
}
if (!bufferContent) {
return Mono.just(new BinaryData(new FluxByteBufferContent(data, length)));
}
long[] trueLength = new long[]{0};
return data.map(buffer -> {
int bufferSize = buffer.remaining();
ByteBuffer copy = ByteBuffer.allocate(bufferSize);
trueLength[0] += bufferSize;
copy.put(buffer);
copy.flip();
return copy;
})
.collect(LinkedList::new, (BiConsumer<LinkedList<ByteBuffer>, ByteBuffer>) LinkedList::add)
.map(buffers -> {
return new BinaryData(new FluxByteBufferContent(Flux.fromIterable(buffers).map(ByteBuffer::duplicate),
(length != null) ? length : trueLength[0], true));
});
}
/**
* Creates an instance of {@link BinaryData} from the given {@link String}.
* <p>
* The {@link String} is converted into bytes using {@link String
* {@link StandardCharsets
* </p>
* <p><strong>Create an instance from a String</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromString
* <pre>
* final String data = "Some Data";
* &
* BinaryData binaryData = BinaryData.fromString&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromString
*
* @param data The {@link String} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link String}.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromString(String data) {
return new BinaryData(new StringContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given byte array.
* <p>
* If the byte array is zero length an empty {@link BinaryData} will be returned. Note that the input byte array is
* used as a reference by this instance of {@link BinaryData} and any changes to the byte array outside of this
* instance will result in the contents of this BinaryData instance being updated as well. To safely update the byte
* array without impacting the BinaryData instance, perform an array copy first.
* </p>
*
* <p><strong>Create an instance from a byte array</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromBytes
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromBytes&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromBytes
*
* @param data The byte array that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the byte array.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromBytes(byte[] data) {
return new BinaryData(new ByteArrayContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link ByteBuffer}.
* <p>
* If the {@link ByteBuffer} is zero length an empty {@link BinaryData} will be returned. Note that the input
* {@link ByteBuffer} is used as a reference by this instance of {@link BinaryData} and any changes to the
* {@link ByteBuffer} outside of this instance will result in the contents of this BinaryData instance being updated
* as well. To safely update the {@link ByteBuffer} without impacting the BinaryData instance, perform an array copy
* first.
* </p>
*
* <p><strong>Create an instance from a ByteBuffer</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromByteBuffer
* <pre>
* final ByteBuffer data = ByteBuffer.wrap&
* BinaryData binaryData = BinaryData.fromByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromByteBuffer
*
* @param data The {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link ByteBuffer}.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromByteBuffer(ByteBuffer data) {
return new BinaryData(new ByteBufferContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link List} of {@link ByteBuffer}.
*
* <p>
* The input {@link ByteBuffer} instances are used as a reference by this instance of {@link BinaryData} and any
* changes to a {@link ByteBuffer} outside of this instance will result in the contents of this BinaryData instance
* being updated as well. To safely update the byte array without impacting the BinaryData instance, perform an
* array copy first.
* </p>
*
* <p><strong>Create an instance from a List<ByteBuffer></strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromListByteBuffer
* <pre>
* final List<ByteBuffer> data = Stream.of&
* .map&
* .collect&
* BinaryData binaryData = BinaryData.fromListByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromListByteBuffer
*
* @param data The {@link List} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link List} of {@link ByteBuffer}.
*/
public static BinaryData fromListByteBuffer(List<ByteBuffer> data) {
return new BinaryData(new ListByteBufferContent(data));
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default
* {@link JsonSerializer}.
*
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to serialize the object.
* </p>
* <p><strong>Creating an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* BinaryData binaryData = BinaryData.fromObject&
*
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObject
*
* @param data The object that will be JSON serialized that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the JSON serialized object.
* @throws NullPointerException If {@code data} is null.
* @see JsonSerializer
*/
public static BinaryData fromObject(Object data) {
return fromObject(data, SERIALIZER);
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default
* {@link JsonSerializer}.
*
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to serialize the object.
* </p>
* <p><strong>Creating an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* Disposable subscriber = BinaryData.fromObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObjectAsync
*
* @param data The object that will be JSON serialized that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object.
* @see JsonSerializer
*/
public static Mono<BinaryData> fromObjectAsync(Object data) {
return fromObjectAsync(data, SERIALIZER);
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed
* {@link ObjectSerializer}.
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
* </p>
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Create an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObject
*
* @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer}
* determines how {@code null} data is serialized.
* @param serializer The {@link ObjectSerializer} used to serialize object.
* @return A {@link BinaryData} representing the serialized object.
* @throws NullPointerException If {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public static BinaryData fromObject(Object data, ObjectSerializer serializer) {
return new BinaryData(new SerializableContent(data, serializer));
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed
* {@link ObjectSerializer}.
*
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
* </p>
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Create an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* final ObjectSerializer serializer = new MyJsonSerializer&
* Disposable subscriber = BinaryData.fromObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObjectAsync
*
* @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer}
* determines how {@code null} data is serialized.
* @param serializer The {@link ObjectSerializer} used to serialize object.
* @return A {@link Mono} of {@link BinaryData} representing the serialized object.
* @throws NullPointerException If {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) {
return Mono.fromCallable(() -> fromObject(data, serializer));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks
* for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is
* not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile -->
* <pre>
* BinaryData binaryData = BinaryData.fromFile&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile -->
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
*/
public static BinaryData fromFile(Path file) {
return fromFile(file, STREAM_READ_SIZE);
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* BinaryData binaryData = BinaryData.fromFile&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param chunkSize The requested size for each read of the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, int chunkSize) {
return new BinaryData(new FileContent(file, chunkSize, null, null));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* long position = 1024;
* long length = 100 * 1048;
* BinaryData binaryData = BinaryData.fromFile&
* new File&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param position Position, or offset, within the path where reading begins.
* @param length Maximum number of bytes to be read from the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, Long position, Long length) {
return new BinaryData(new FileContent(file, STREAM_READ_SIZE, position, length));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* long position = 1024;
* long length = 100 * 1048;
* int chunkSize = 8092;
* BinaryData binaryData = BinaryData.fromFile&
* new File&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param position Position, or offset, within the path where reading begins.
* @param length Maximum number of bytes to be read from the path.
* @param chunkSize The requested size for each read of the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, Long position, Long length, int chunkSize) {
return new BinaryData(new FileContent(file, chunkSize, position, length));
}
/**
* Returns a byte array representation of this {@link BinaryData}.
* <p>
* This method returns a reference to the underlying byte array. Modifying the contents of the returned byte array
* may change the content of this BinaryData instance. If the content source of this BinaryData instance is a file,
* an {@link InputStream}, or a {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte
* array, it is recommended to make a copy of the contents first.
* <p>
* If the {@link BinaryData} is larger than the maximum size allowed for a {@code byte[]} this will throw an
* {@link IllegalStateException}.
*
* @return A byte array representing this {@link BinaryData}.
* @throws IllegalStateException If the {@link BinaryData} is larger than the maximum size allowed for a
* {@code byte[]}.
*/
public byte[] toBytes() {
return content.toBytes();
}
/**
* Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8
* character set. A new instance of String is created each time this method is called.
* <p>
* If the {@link BinaryData} is larger than the maximum size allowed for a {@link String} this will throw an
* {@link IllegalStateException}.
*
* @return A {@link String} representing this {@link BinaryData}.
* @throws IllegalStateException If the {@link BinaryData} is larger than the maximum size allowed for a
* {@link String}.
*/
public String toString() {
return content.toString();
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param <T> Type of the deserialized Object.
* @param clazz The {@link Class} representing the Object's type.
* @return An {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} is null.
* @see JsonSerializer
*/
public <T> T toObject(Class<T> clazz) {
return toObject(TypeReference.createInstance(clazz), SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* &
* &
* &
* &
* &
*
*
* BinaryData binaryData = BinaryData.fromObject&
*
* List<Person> persons = binaryData.toObject&
* persons.forEach&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} is null.
* @see JsonSerializer
*/
public <T> T toObject(TypeReference<T> typeReference) {
return toObject(typeReference, SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param clazz The {@link Class} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) {
return toObject(TypeReference.createInstance(clazz), serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* &
* List<Person> persons = binaryData.toObject&
* persons.forEach&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
Objects.requireNonNull(typeReference, "'typeReference' cannot be null.");
Objects.requireNonNull(serializer, "'serializer' cannot be null.");
return content.toObject(typeReference, serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param clazz The {@link Class} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} is null.
* @see JsonSerializer
*/
public <T> Mono<T> toObjectAsync(Class<T> clazz) {
return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} is null.
* @see JsonSerializer
*/
public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) {
return toObjectAsync(typeReference, SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param clazz The {@link Class} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) {
return toObjectAsync(TypeReference.createInstance(clazz), serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData
* .toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData
* .toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) {
return Mono.fromCallable(() -> toObject(typeReference, serializer));
}
/**
* Returns an {@link InputStream} representation of this {@link BinaryData}.
*
* <p><strong>Get an InputStream from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toStream -->
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromStream&
* final byte[] bytes = new byte[data.length];
* try &
* inputStream.read&
* System.out.println&
* &
* </pre>
* <!-- end com.azure.core.util.BinaryData.toStream -->
*
* @return An {@link InputStream} representing the {@link BinaryData}.
*/
public InputStream toStream() {
return content.toStream();
}
/**
* Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}.
* <p>
* Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}.
*
* <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.util.BinaryData.toByteBuffer -->
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromBytes&
* final byte[] bytes = new byte[data.length];
* binaryData.toByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.util.BinaryData.toByteBuffer -->
*
* @return A read-only {@link ByteBuffer} representing the {@link BinaryData}.
*/
public ByteBuffer toByteBuffer() {
return content.toByteBuffer();
}
/**
* Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The content
* is not read from the underlying data source until the {@link Flux} is subscribed to.
*
* @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}.
*/
public Flux<ByteBuffer> toFluxByteBuffer() {
return content.toFluxByteBuffer();
}
/**
* Writes the contents of this {@link BinaryData} to the given {@link OutputStream}.
* <p>
* This method does not close the {@link OutputStream}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param outputStream The {@link OutputStream} to write the contents of this {@link BinaryData} to.
* @throws NullPointerException If {@code outputStream} is null.
* @throws IOException If an I/O error occurs.
*/
/**
* Writes the contents of this {@link BinaryData} to the given {@link WritableByteChannel}.
* <p>
* This method does not close the {@link WritableByteChannel}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param channel The {@link WritableByteChannel} to write the contents of this {@link BinaryData} to.
* @throws NullPointerException If {@code channel} is null.
* @throws IOException If an I/O error occurs.
*/
public void writeTo(WritableByteChannel channel) throws IOException {
Objects.requireNonNull(channel, "'channel' cannot be null.");
content.writeTo(channel);
}
/**
* Writes the contents of this {@link BinaryData} to the given {@link AsynchronousByteChannel}.
* <p>
* This method does not close the {@link AsynchronousByteChannel}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param channel The {@link AsynchronousByteChannel} to write the contents of this {@link BinaryData} to.
* @return A {@link Mono} the completes once content has been written or had an error writing.
* @throws NullPointerException If {@code channel} is null.
*/
public Mono<Void> writeTo(AsynchronousByteChannel channel) {
return content.writeTo(channel);
}
/**
* Returns the length of the content, if it is known. The length can be {@code null} if the source did not specify
* the length or the length cannot be determined without reading the whole content.
*
* @return the length of the content, if it is known.
*/
public Long getLength() {
return content.getLength();
}
/**
* Returns a flag indicating whether the content can be repeatedly consumed using all accessors including
* {@link
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors simultaneously regardless of
* what this method returns.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayability -->
* <pre>
* BinaryData binaryData = binaryDataProducer&
*
* if &
* binaryData = binaryData.toReplayableBinaryData&
* &
*
* streamConsumer&
* streamConsumer&
* </pre>
* <!-- end com.azure.util.BinaryData.replayability -->
*
* <!-- src_embed com.azure.util.BinaryData.replayabilityAsync -->
* <pre>
* Mono.fromCallable&
* .flatMap&
* if &
* return Mono.just&
* &
* return binaryData.toReplayableBinaryDataAsync&
* &
* &
* .flatMap&
* fluxConsumer&
* .then&
* .subscribe&
* </pre>
* <!-- end com.azure.util.BinaryData.replayabilityAsync -->
*
* @return a flag indicating whether the content can be repeatedly consumed using all accessors.
*/
public boolean isReplayable() {
return content.isReplayable();
}
/**
* Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed
* repeatedly using all accessors including {@link
*
* <p>
* A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and
* resetting a stream or buffering in memory are employed to assure replayability.
* </p>
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData}
* simultaneously.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayability -->
* <pre>
* BinaryData binaryData = binaryDataProducer&
*
* if &
* binaryData = binaryData.toReplayableBinaryData&
* &
*
* streamConsumer&
* streamConsumer&
* </pre>
* <!-- end com.azure.util.BinaryData.replayability -->
*
* @return Replayable {@link BinaryData}.
*/
public BinaryData toReplayableBinaryData() {
if (this.isReplayable()) {
return this;
} else {
return new BinaryData(content.toReplayableContent());
}
}
/**
* Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed
* repeatedly using all accessors including {@link
*
* <p>
* A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and
* resetting a stream or buffering in memory are employed to assure replayability.
* </p>
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData}
* simultaneously.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayabilityAsync -->
* <pre>
* Mono.fromCallable&
* .flatMap&
* if &
* return Mono.just&
* &
* return binaryData.toReplayableBinaryDataAsync&
* &
* &
* .flatMap&
* fluxConsumer&
* .then&
* .subscribe&
* </pre>
* <!-- end com.azure.util.BinaryData.replayabilityAsync -->
*
* @return A {@link Mono} of {@link BinaryData} representing the replayable {@link BinaryData}.
*/
public Mono<BinaryData> toReplayableBinaryDataAsync() {
if (isReplayable()) {
return Mono.just(this);
} else {
return content.toReplayableContentAsync().map(BinaryData::new);
}
}
} |
Using `!` will simplify the code and don't need to `continue`. | public static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) {
final HttpHeaders httpHeaders = new HttpHeaders((int) (headers.map().size() / 0.75F));
for (Map.Entry<String, List<String>> kvp : headers.map().entrySet()) {
if (CoreUtils.isNullOrEmpty(kvp.getValue())) {
continue;
}
httpHeaders.set(kvp.getKey(), kvp.getValue());
}
return httpHeaders;
} | if (CoreUtils.isNullOrEmpty(kvp.getValue())) { | public static HttpHeaders fromJdkHttpHeaders(java.net.http.HttpHeaders headers) {
final HttpHeaders httpHeaders = new HttpHeaders((int) (headers.map().size() / 0.75F));
for (Map.Entry<String, List<String>> kvp : headers.map().entrySet()) {
if (!CoreUtils.isNullOrEmpty(kvp.getValue())) {
httpHeaders.set(kvp.getKey(), kvp.getValue());
}
}
return httpHeaders;
} | class JdkHttpUtils {
/**
* Converts the given JDK Http headers to azure-core Http header.
*
* @param headers the JDK Http headers
* @return the azure-core Http headers
*/
@SuppressWarnings("deprecation")
} | class JdkHttpUtils {
/**
* Converts the given JDK Http headers to azure-core Http header.
*
* @param headers the JDK Http headers
* @return the azure-core Http headers
*/
@SuppressWarnings("deprecation")
} |
Would prefer throwing `UnsupportedOperationException` here. | public void writeTo(BufferedSink bufferedSink) throws IOException {
if (!body.isReplayable() && !BODY_SENT_UPDATER.compareAndSet(this, 0, 1)) {
throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send body is not supported."));
} else {
body.writeTo(bufferedSink);
}
} | throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send body is not supported.")); | public void writeTo(BufferedSink bufferedSink) throws IOException {
if (!body.isReplayable() && !BODY_SENT_UPDATER.compareAndSet(this, 0, 1)) {
throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send body is not supported."));
} else {
body.writeTo(bufferedSink);
}
} | class BinaryDataRequestBody extends RequestBody {
private static final ClientLogger LOGGER = new ClientLogger(BinaryDataRequestBody.class);
private final MediaType contentType;
private final BinaryData body;
private final long effectiveContentLength;
private volatile int bodySent = 0;
private static final AtomicIntegerFieldUpdater<BinaryDataRequestBody> BODY_SENT_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(BinaryDataRequestBody.class, "bodySent");
/**
* Creates a new instance of the BinaryDataRequestBody class.
*
* @param body The {@link BinaryData} to use as the body.
* @param contentType The content type of the body.
* @param effectiveContentLength The length of the body.
*/
public BinaryDataRequestBody(BinaryData body, MediaType contentType, long effectiveContentLength) {
this.body = body;
this.contentType = contentType;
this.effectiveContentLength = effectiveContentLength;
}
@Override
public long contentLength() throws IOException {
return effectiveContentLength;
}
@Override
public boolean isOneShot() {
return !body.isReplayable();
}
@Override
public MediaType contentType() {
return contentType;
}
@Override
} | class BinaryDataRequestBody extends RequestBody {
private static final ClientLogger LOGGER = new ClientLogger(BinaryDataRequestBody.class);
private final MediaType contentType;
private final BinaryData body;
private final long effectiveContentLength;
private volatile int bodySent = 0;
private static final AtomicIntegerFieldUpdater<BinaryDataRequestBody> BODY_SENT_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(BinaryDataRequestBody.class, "bodySent");
/**
* Creates a new instance of the BinaryDataRequestBody class.
*
* @param body The {@link BinaryData} to use as the body.
* @param contentType The content type of the body.
* @param effectiveContentLength The length of the body.
*/
public BinaryDataRequestBody(BinaryData body, MediaType contentType, long effectiveContentLength) {
this.body = body;
this.contentType = contentType;
this.effectiveContentLength = effectiveContentLength;
}
@Override
public long contentLength() throws IOException {
return effectiveContentLength;
}
@Override
public boolean isOneShot() {
return !body.isReplayable();
}
@Override
public MediaType contentType() {
return contentType;
}
@Override
} |
Moved this to calling an API that does this check. | public void writeTo(OutputStream outputStream) throws IOException {
InputStream inputStream = content.get();
if (bufferedContent != null) {
for (ByteBuffer bb : bufferedContent) {
ImplUtils.writeByteBufferToStream(bb, outputStream);
}
} else {
byte[] buffer = new byte[STREAM_READ_SIZE];
int read;
while ((read = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, read);
}
}
} | byte[] buffer = new byte[STREAM_READ_SIZE]; | public void writeTo(OutputStream outputStream) throws IOException {
writeTo(Channels.newChannel(outputStream));
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private static final int INITIAL_BUFFER_CHUNK_SIZE = 8 * 1024;
private static final int MAX_BUFFER_CHUNK_SIZE = 8 * 1024 * 1024;
private static final int MAX_ARRAY_LENGTH = Integer.MAX_VALUE - 8;
private final Supplier<InputStream> content;
private final Long length;
private final boolean isReplayable;
private final List<ByteBuffer> bufferedContent;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<InputStreamContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(InputStreamContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @param length The length of the content.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream, Long length) {
Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
this.length = length;
this.isReplayable = canMarkReset(inputStream, length);
if (this.isReplayable) {
inputStream.mark(length.intValue());
this.content = () -> resettableContent(inputStream);
} else {
this.content = () -> inputStream;
}
this.bufferedContent = null;
}
private InputStreamContent(Supplier<InputStream> inputStreamSupplier, Long length,
List<ByteBuffer> bufferedContent) {
this.content = Objects.requireNonNull(inputStreamSupplier, "'inputStreamSupplier' cannot be null.");
this.length = length;
this.isReplayable = true;
this.bufferedContent = bufferedContent;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content.get();
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
if (bufferedContent != null) {
return Flux.fromIterable(bufferedContent).map(ByteBuffer::asReadOnlyBuffer);
} else {
return FluxUtil.toFluxByteBuffer(this.content.get(), STREAM_READ_SIZE);
}
}
@Override
@Override
public void writeTo(WritableByteChannel channel) throws IOException {
InputStream inputStream = content.get();
if (bufferedContent != null) {
for (ByteBuffer bb : bufferedContent) {
bb = bb.duplicate();
while (bb.hasRemaining()) {
channel.write(bb);
}
}
} else {
byte[] buffer = new byte[STREAM_READ_SIZE];
int read;
while ((read = inputStream.read(buffer)) != -1) {
ByteBuffer bb = ByteBuffer.wrap(buffer, 0, read);
while (bb.hasRemaining()) {
channel.write(bb);
}
}
}
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
return readAndBuffer(this.content.get(), length);
}
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
InputStream inputStream = this.content.get();
return Mono.just(inputStream)
.publishOn(Schedulers.boundedElastic())
.map(is -> readAndBuffer(is, length));
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private static boolean canMarkReset(InputStream inputStream, Long length) {
return length != null && length < MAX_ARRAY_LENGTH && inputStream.markSupported();
}
private static InputStream resettableContent(InputStream stream) {
try {
stream.reset();
return stream;
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
private static InputStreamContent readAndBuffer(InputStream inputStream, Long length) {
try {
Tuple2<Long, List<ByteBuffer>> streamRead = StreamUtil.readStreamToListOfByteBuffers(
inputStream, length, INITIAL_BUFFER_CHUNK_SIZE, MAX_BUFFER_CHUNK_SIZE);
long readLength = streamRead.getT1();
List<ByteBuffer> byteBuffers = streamRead.getT2();
if (length == null || length != readLength) {
return new InputStreamContent(() -> new IterableOfByteBuffersInputStream(byteBuffers), readLength,
byteBuffers);
} else {
return new InputStreamContent(() -> new IterableOfByteBuffersInputStream(byteBuffers), length,
byteBuffers);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
private byte[] getBytes() {
try {
AccessibleByteArrayOutputStream dataOutputBuffer = (length == null || length < MAX_ARRAY_LENGTH)
? new AccessibleByteArrayOutputStream() : new AccessibleByteArrayOutputStream(length.intValue());
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
InputStream inputStream = this.content.get();
while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} | class InputStreamContent extends BinaryDataContent {
private static final ClientLogger LOGGER = new ClientLogger(InputStreamContent.class);
private static final int INITIAL_BUFFER_CHUNK_SIZE = 8 * 1024;
private static final int MAX_BUFFER_CHUNK_SIZE = 8 * 1024 * 1024;
private static final int MAX_ARRAY_LENGTH = Integer.MAX_VALUE - 8;
private final Supplier<InputStream> content;
private final Long length;
private final boolean isReplayable;
private final List<ByteBuffer> bufferedContent;
private volatile byte[] bytes;
private static final AtomicReferenceFieldUpdater<InputStreamContent, byte[]> BYTES_UPDATER
= AtomicReferenceFieldUpdater.newUpdater(InputStreamContent.class, byte[].class, "bytes");
/**
* Creates an instance of {@link InputStreamContent}.
*
* @param inputStream The inputStream that is used as the content for this instance.
* @param length The length of the content.
* @throws NullPointerException if {@code content} is null.
*/
public InputStreamContent(InputStream inputStream, Long length) {
Objects.requireNonNull(inputStream, "'inputStream' cannot be null.");
this.length = length;
this.isReplayable = canMarkReset(inputStream, length);
if (this.isReplayable) {
inputStream.mark(length.intValue());
this.content = () -> resettableContent(inputStream);
} else {
this.content = () -> inputStream;
}
this.bufferedContent = null;
}
private InputStreamContent(Supplier<InputStream> inputStreamSupplier, Long length,
List<ByteBuffer> bufferedContent) {
this.content = Objects.requireNonNull(inputStreamSupplier, "'inputStreamSupplier' cannot be null.");
this.length = length;
this.isReplayable = true;
this.bufferedContent = bufferedContent;
}
@Override
public Long getLength() {
byte[] data = BYTES_UPDATER.get(this);
if (data != null) {
return (long) data.length;
}
return length;
}
@Override
public String toString() {
return new String(toBytes(), StandardCharsets.UTF_8);
}
@Override
public byte[] toBytes() {
return BYTES_UPDATER.updateAndGet(this, bytes -> bytes == null ? getBytes() : bytes);
}
@Override
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
return serializer.deserializeFromBytes(toBytes(), typeReference);
}
@Override
public InputStream toStream() {
return this.content.get();
}
@Override
public ByteBuffer toByteBuffer() {
return ByteBuffer.wrap(toBytes()).asReadOnlyBuffer();
}
@Override
public Flux<ByteBuffer> toFluxByteBuffer() {
if (bufferedContent != null) {
return Flux.fromIterable(bufferedContent).map(ByteBuffer::asReadOnlyBuffer);
} else {
return FluxUtil.toFluxByteBuffer(this.content.get(), STREAM_READ_SIZE);
}
}
@Override
@Override
public void writeTo(WritableByteChannel channel) throws IOException {
InputStream inputStream = content.get();
if (bufferedContent != null) {
for (ByteBuffer bb : bufferedContent) {
ImplUtils.fullyWriteBuffer(bb.duplicate(), channel);
}
} else {
IOUtils.transfer(Channels.newChannel(inputStream), channel, length);
}
}
@Override
public Mono<Void> writeTo(AsynchronousByteChannel channel) {
if (channel == null) {
return monoError(LOGGER, new NullPointerException("'channel' cannot be null."));
}
return FluxUtil.writeToAsynchronousByteChannel(toFluxByteBuffer(), channel);
}
@Override
public boolean isReplayable() {
return isReplayable;
}
@Override
public BinaryDataContent toReplayableContent() {
if (isReplayable) {
return this;
}
return readAndBuffer(this.content.get(), length);
}
@Override
public Mono<BinaryDataContent> toReplayableContentAsync() {
if (isReplayable) {
return Mono.just(this);
}
InputStream inputStream = this.content.get();
return Mono.just(inputStream)
.publishOn(Schedulers.boundedElastic())
.map(is -> readAndBuffer(is, length));
}
@Override
public BinaryDataContentType getContentType() {
return BinaryDataContentType.BINARY;
}
private static boolean canMarkReset(InputStream inputStream, Long length) {
return length != null && length < MAX_ARRAY_LENGTH && inputStream.markSupported();
}
private static InputStream resettableContent(InputStream stream) {
try {
stream.reset();
return stream;
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
private static InputStreamContent readAndBuffer(InputStream inputStream, Long length) {
try {
Tuple2<Long, List<ByteBuffer>> streamRead = StreamUtil.readStreamToListOfByteBuffers(
inputStream, length, INITIAL_BUFFER_CHUNK_SIZE, MAX_BUFFER_CHUNK_SIZE);
long readLength = streamRead.getT1();
List<ByteBuffer> byteBuffers = streamRead.getT2();
if (length == null || length != readLength) {
return new InputStreamContent(() -> new IterableOfByteBuffersInputStream(byteBuffers), readLength,
byteBuffers);
} else {
return new InputStreamContent(() -> new IterableOfByteBuffersInputStream(byteBuffers), length,
byteBuffers);
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
private byte[] getBytes() {
try {
AccessibleByteArrayOutputStream dataOutputBuffer = (length == null || length < MAX_ARRAY_LENGTH)
? new AccessibleByteArrayOutputStream() : new AccessibleByteArrayOutputStream(length.intValue());
int nRead;
byte[] data = new byte[STREAM_READ_SIZE];
InputStream inputStream = this.content.get();
while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
dataOutputBuffer.write(data, 0, nRead);
}
return dataOutputBuffer.toByteArray();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
} |
Moved most changes into shared code paths with existing tests | public void writeTo(OutputStream outputStream) throws IOException {
Objects.requireNonNull(outputStream, "'outputStream' cannot be null.");
content.writeTo(outputStream);
} | content.writeTo(outputStream); | public void writeTo(OutputStream outputStream) throws IOException {
Objects.requireNonNull(outputStream, "'outputStream' cannot be null.");
content.writeTo(outputStream);
} | class BinaryData {
private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class);
static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true);
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
private final BinaryDataContent content;
BinaryData(BinaryDataContent content) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
}
static {
BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() {
@Override
public BinaryData createBinaryData(BinaryDataContent content) {
return new BinaryData(content);
}
@Override
public BinaryDataContent getContent(BinaryData binaryData) {
return binaryData.content;
}
});
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of
* inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream
* content is not cached if the stream is not read into a format that requires the content to be fully read into
* memory.
* <p>
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
* </p>
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStream
* <pre>
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
* BinaryData binaryData = BinaryData.fromStream&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStream
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static BinaryData fromStream(InputStream inputStream) {
return fromStream(inputStream, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of
* inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream
* content is not cached if the stream is not read into a format that requires the content to be fully read into
* memory.
* <p>
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
* </p>
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStream
* <pre>
* byte[] bytes = "Some Data".getBytes&
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
* BinaryData binaryData = BinaryData.fromStream&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStream
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static BinaryData fromStream(InputStream inputStream, Long length) {
return new BinaryData(new InputStreamContent(inputStream, length));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}.
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync
* <pre>
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromStreamAsync&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStreamAsync
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) {
return fromStreamAsync(inputStream, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}.
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync
* <pre>
* byte[] bytes = "Some Data".getBytes&
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromStreamAsync&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStreamAsync
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static Mono<BinaryData> fromStreamAsync(InputStream inputStream, Long length) {
return Mono.fromCallable(() -> fromStream(inputStream, length));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <p>This method aggregates data into single byte array.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws NullPointerException If {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) {
return fromFlux(data, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <p>This method aggregates data into single byte array.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final long length = data.length;
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws IllegalArgumentException if the length is less than zero.
* @throws NullPointerException if {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) {
return fromFlux(data, length, true);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
* <p>
* If {@code bufferContent} is true and {@code length} is null the length of the returned {@link BinaryData} will be
* based on the length calculated by buffering. If {@code length} is non-null it will always be used as the
* {@link BinaryData} length even if buffering determines a different length.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final long length = data.length;
* final boolean shouldAggregateData = false;
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or consumption deferred.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws IllegalArgumentException if the length is less than zero.
* @throws NullPointerException if {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) {
if (data == null) {
return monoError(LOGGER, new NullPointerException("'data' cannot be null."));
}
if (length != null && length < 0) {
return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0."));
}
if (!bufferContent) {
return Mono.just(new BinaryData(new FluxByteBufferContent(data, length)));
}
long[] trueLength = new long[]{0};
return data.map(buffer -> {
int bufferSize = buffer.remaining();
ByteBuffer copy = ByteBuffer.allocate(bufferSize);
trueLength[0] += bufferSize;
copy.put(buffer);
copy.flip();
return copy;
})
.collect(LinkedList::new, (BiConsumer<LinkedList<ByteBuffer>, ByteBuffer>) LinkedList::add)
.map(buffers -> {
return new BinaryData(new FluxByteBufferContent(Flux.fromIterable(buffers).map(ByteBuffer::duplicate),
(length != null) ? length : trueLength[0], true));
});
}
/**
* Creates an instance of {@link BinaryData} from the given {@link String}.
* <p>
* The {@link String} is converted into bytes using {@link String
* {@link StandardCharsets
* </p>
* <p><strong>Create an instance from a String</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromString
* <pre>
* final String data = "Some Data";
* &
* BinaryData binaryData = BinaryData.fromString&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromString
*
* @param data The {@link String} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link String}.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromString(String data) {
return new BinaryData(new StringContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given byte array.
* <p>
* If the byte array is zero length an empty {@link BinaryData} will be returned. Note that the input byte array is
* used as a reference by this instance of {@link BinaryData} and any changes to the byte array outside of this
* instance will result in the contents of this BinaryData instance being updated as well. To safely update the byte
* array without impacting the BinaryData instance, perform an array copy first.
* </p>
*
* <p><strong>Create an instance from a byte array</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromBytes
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromBytes&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromBytes
*
* @param data The byte array that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the byte array.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromBytes(byte[] data) {
return new BinaryData(new ByteArrayContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link ByteBuffer}.
* <p>
* If the {@link ByteBuffer} is zero length an empty {@link BinaryData} will be returned. Note that the input
* {@link ByteBuffer} is used as a reference by this instance of {@link BinaryData} and any changes to the
* {@link ByteBuffer} outside of this instance will result in the contents of this BinaryData instance being updated
* as well. To safely update the {@link ByteBuffer} without impacting the BinaryData instance, perform an array copy
* first.
* </p>
*
* <p><strong>Create an instance from a ByteBuffer</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromByteBuffer
* <pre>
* final ByteBuffer data = ByteBuffer.wrap&
* BinaryData binaryData = BinaryData.fromByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromByteBuffer
*
* @param data The {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link ByteBuffer}.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromByteBuffer(ByteBuffer data) {
return new BinaryData(new ByteBufferContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link List} of {@link ByteBuffer}.
*
* <p>
* The input {@link ByteBuffer} instances are used as a reference by this instance of {@link BinaryData} and any
* changes to a {@link ByteBuffer} outside of this instance will result in the contents of this BinaryData instance
* being updated as well. To safely update the byte array without impacting the BinaryData instance, perform an
* array copy first.
* </p>
*
* <p><strong>Create an instance from a List<ByteBuffer></strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromListByteBuffer
* <pre>
* final List<ByteBuffer> data = Stream.of&
* .map&
* .collect&
* BinaryData binaryData = BinaryData.fromListByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromListByteBuffer
*
* @param data The {@link List} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link List} of {@link ByteBuffer}.
*/
public static BinaryData fromListByteBuffer(List<ByteBuffer> data) {
return new BinaryData(new ListByteBufferContent(data));
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default
* {@link JsonSerializer}.
*
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to serialize the object.
* </p>
* <p><strong>Creating an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* BinaryData binaryData = BinaryData.fromObject&
*
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObject
*
* @param data The object that will be JSON serialized that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the JSON serialized object.
* @throws NullPointerException If {@code data} is null.
* @see JsonSerializer
*/
public static BinaryData fromObject(Object data) {
return fromObject(data, SERIALIZER);
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default
* {@link JsonSerializer}.
*
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to serialize the object.
* </p>
* <p><strong>Creating an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* Disposable subscriber = BinaryData.fromObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObjectAsync
*
* @param data The object that will be JSON serialized that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object.
* @see JsonSerializer
*/
public static Mono<BinaryData> fromObjectAsync(Object data) {
return fromObjectAsync(data, SERIALIZER);
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed
* {@link ObjectSerializer}.
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
* </p>
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Create an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObject
*
* @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer}
* determines how {@code null} data is serialized.
* @param serializer The {@link ObjectSerializer} used to serialize object.
* @return A {@link BinaryData} representing the serialized object.
* @throws NullPointerException If {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public static BinaryData fromObject(Object data, ObjectSerializer serializer) {
return new BinaryData(new SerializableContent(data, serializer));
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed
* {@link ObjectSerializer}.
*
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
* </p>
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Create an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* final ObjectSerializer serializer = new MyJsonSerializer&
* Disposable subscriber = BinaryData.fromObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObjectAsync
*
* @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer}
* determines how {@code null} data is serialized.
* @param serializer The {@link ObjectSerializer} used to serialize object.
* @return A {@link Mono} of {@link BinaryData} representing the serialized object.
* @throws NullPointerException If {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) {
return Mono.fromCallable(() -> fromObject(data, serializer));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks
* for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is
* not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile -->
* <pre>
* BinaryData binaryData = BinaryData.fromFile&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile -->
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
*/
public static BinaryData fromFile(Path file) {
return fromFile(file, STREAM_READ_SIZE);
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* BinaryData binaryData = BinaryData.fromFile&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param chunkSize The requested size for each read of the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, int chunkSize) {
return new BinaryData(new FileContent(file, chunkSize, null, null));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* long position = 1024;
* long length = 100 * 1048;
* BinaryData binaryData = BinaryData.fromFile&
* new File&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param position Position, or offset, within the path where reading begins.
* @param length Maximum number of bytes to be read from the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, Long position, Long length) {
return new BinaryData(new FileContent(file, STREAM_READ_SIZE, position, length));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* long position = 1024;
* long length = 100 * 1048;
* int chunkSize = 8092;
* BinaryData binaryData = BinaryData.fromFile&
* new File&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param position Position, or offset, within the path where reading begins.
* @param length Maximum number of bytes to be read from the path.
* @param chunkSize The requested size for each read of the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, Long position, Long length, int chunkSize) {
return new BinaryData(new FileContent(file, chunkSize, position, length));
}
/**
* Returns a byte array representation of this {@link BinaryData}.
* <p>
* This method returns a reference to the underlying byte array. Modifying the contents of the returned byte array
* may change the content of this BinaryData instance. If the content source of this BinaryData instance is a file,
* an {@link InputStream}, or a {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte
* array, it is recommended to make a copy of the contents first.
* <p>
* If the {@link BinaryData} is larger than the maximum size allowed for a {@code byte[]} this will throw an
* {@link IllegalStateException}.
*
* @return A byte array representing this {@link BinaryData}.
* @throws IllegalStateException If the {@link BinaryData} is larger than the maximum size allowed for a
* {@code byte[]}.
*/
public byte[] toBytes() {
return content.toBytes();
}
/**
* Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8
* character set. A new instance of String is created each time this method is called.
* <p>
* If the {@link BinaryData} is larger than the maximum size allowed for a {@link String} this will throw an
* {@link IllegalStateException}.
*
* @return A {@link String} representing this {@link BinaryData}.
* @throws IllegalStateException If the {@link BinaryData} is larger than the maximum size allowed for a
* {@link String}.
*/
public String toString() {
return content.toString();
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param <T> Type of the deserialized Object.
* @param clazz The {@link Class} representing the Object's type.
* @return An {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} is null.
* @see JsonSerializer
*/
public <T> T toObject(Class<T> clazz) {
return toObject(TypeReference.createInstance(clazz), SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* &
* &
* &
* &
* &
*
*
* BinaryData binaryData = BinaryData.fromObject&
*
* List<Person> persons = binaryData.toObject&
* persons.forEach&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} is null.
* @see JsonSerializer
*/
public <T> T toObject(TypeReference<T> typeReference) {
return toObject(typeReference, SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param clazz The {@link Class} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) {
return toObject(TypeReference.createInstance(clazz), serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* &
* List<Person> persons = binaryData.toObject&
* persons.forEach&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
Objects.requireNonNull(typeReference, "'typeReference' cannot be null.");
Objects.requireNonNull(serializer, "'serializer' cannot be null.");
return content.toObject(typeReference, serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param clazz The {@link Class} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} is null.
* @see JsonSerializer
*/
public <T> Mono<T> toObjectAsync(Class<T> clazz) {
return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} is null.
* @see JsonSerializer
*/
public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) {
return toObjectAsync(typeReference, SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param clazz The {@link Class} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) {
return toObjectAsync(TypeReference.createInstance(clazz), serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData
* .toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData
* .toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) {
return Mono.fromCallable(() -> toObject(typeReference, serializer));
}
/**
* Returns an {@link InputStream} representation of this {@link BinaryData}.
*
* <p><strong>Get an InputStream from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toStream -->
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromStream&
* final byte[] bytes = new byte[data.length];
* try &
* inputStream.read&
* System.out.println&
* &
* </pre>
* <!-- end com.azure.core.util.BinaryData.toStream -->
*
* @return An {@link InputStream} representing the {@link BinaryData}.
*/
public InputStream toStream() {
return content.toStream();
}
/**
* Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}.
* <p>
* Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}.
*
* <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.util.BinaryData.toByteBuffer -->
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromBytes&
* final byte[] bytes = new byte[data.length];
* binaryData.toByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.util.BinaryData.toByteBuffer -->
*
* @return A read-only {@link ByteBuffer} representing the {@link BinaryData}.
*/
public ByteBuffer toByteBuffer() {
return content.toByteBuffer();
}
/**
* Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The content
* is not read from the underlying data source until the {@link Flux} is subscribed to.
*
* @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}.
*/
public Flux<ByteBuffer> toFluxByteBuffer() {
return content.toFluxByteBuffer();
}
/**
* Writes the contents of this {@link BinaryData} to the given {@link OutputStream}.
* <p>
* This method does not close the {@link OutputStream}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param outputStream The {@link OutputStream} to write the contents of this {@link BinaryData} to.
* @throws NullPointerException If {@code outputStream} is null.
* @throws IOException If an I/O error occurs.
*/
/**
* Writes the contents of this {@link BinaryData} to the given {@link WritableByteChannel}.
* <p>
* This method does not close the {@link WritableByteChannel}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param channel The {@link WritableByteChannel} to write the contents of this {@link BinaryData} to.
* @throws NullPointerException If {@code channel} is null.
* @throws IOException If an I/O error occurs.
*/
public void writeTo(WritableByteChannel channel) throws IOException {
Objects.requireNonNull(channel, "'channel' cannot be null.");
content.writeTo(channel);
}
/**
* Writes the contents of this {@link BinaryData} to the given {@link AsynchronousByteChannel}.
* <p>
* This method does not close the {@link AsynchronousByteChannel}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param channel The {@link AsynchronousByteChannel} to write the contents of this {@link BinaryData} to.
* @return A {@link Mono} the completes once content has been written or had an error writing.
* @throws NullPointerException If {@code channel} is null.
* @throws IOException If an I/O error occurs.
*/
public Mono<Void> writeToAsync(AsynchronousByteChannel channel) throws IOException {
if (channel == null) {
return Mono.error(new NullPointerException("'channel' cannot be null."));
}
return content.writeToAsync(channel);
}
/**
* Returns the length of the content, if it is known. The length can be {@code null} if the source did not specify
* the length or the length cannot be determined without reading the whole content.
*
* @return the length of the content, if it is known.
*/
public Long getLength() {
return content.getLength();
}
/**
* Returns a flag indicating whether the content can be repeatedly consumed using all accessors including
* {@link
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors simultaneously regardless of
* what this method returns.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayability -->
* <pre>
* BinaryData binaryData = binaryDataProducer&
*
* if &
* binaryData = binaryData.toReplayableBinaryData&
* &
*
* streamConsumer&
* streamConsumer&
* </pre>
* <!-- end com.azure.util.BinaryData.replayability -->
*
* <!-- src_embed com.azure.util.BinaryData.replayabilityAsync -->
* <pre>
* Mono.fromCallable&
* .flatMap&
* if &
* return Mono.just&
* &
* return binaryData.toReplayableBinaryDataAsync&
* &
* &
* .flatMap&
* fluxConsumer&
* .then&
* .subscribe&
* </pre>
* <!-- end com.azure.util.BinaryData.replayabilityAsync -->
*
* @return a flag indicating whether the content can be repeatedly consumed using all accessors.
*/
public boolean isReplayable() {
return content.isReplayable();
}
/**
* Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed
* repeatedly using all accessors including {@link
*
* <p>
* A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and
* resetting a stream or buffering in memory are employed to assure replayability.
* </p>
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData}
* simultaneously.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayability -->
* <pre>
* BinaryData binaryData = binaryDataProducer&
*
* if &
* binaryData = binaryData.toReplayableBinaryData&
* &
*
* streamConsumer&
* streamConsumer&
* </pre>
* <!-- end com.azure.util.BinaryData.replayability -->
*
* @return Replayable {@link BinaryData}.
*/
public BinaryData toReplayableBinaryData() {
if (this.isReplayable()) {
return this;
} else {
return new BinaryData(content.toReplayableContent());
}
}
/**
* Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed
* repeatedly using all accessors including {@link
*
* <p>
* A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and
* resetting a stream or buffering in memory are employed to assure replayability.
* </p>
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData}
* simultaneously.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayabilityAsync -->
* <pre>
* Mono.fromCallable&
* .flatMap&
* if &
* return Mono.just&
* &
* return binaryData.toReplayableBinaryDataAsync&
* &
* &
* .flatMap&
* fluxConsumer&
* .then&
* .subscribe&
* </pre>
* <!-- end com.azure.util.BinaryData.replayabilityAsync -->
*
* @return A {@link Mono} of {@link BinaryData} representing the replayable {@link BinaryData}.
*/
public Mono<BinaryData> toReplayableBinaryDataAsync() {
if (isReplayable()) {
return Mono.just(this);
} else {
return content.toReplayableContentAsync().map(BinaryData::new);
}
}
} | class BinaryData {
private static final ClientLogger LOGGER = new ClientLogger(BinaryData.class);
static final JsonSerializer SERIALIZER = JsonSerializerProviders.createInstance(true);
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
private final BinaryDataContent content;
BinaryData(BinaryDataContent content) {
this.content = Objects.requireNonNull(content, "'content' cannot be null.");
}
static {
BinaryDataHelper.setAccessor(new BinaryDataHelper.BinaryDataAccessor() {
@Override
public BinaryData createBinaryData(BinaryDataContent content) {
return new BinaryData(content);
}
@Override
public BinaryDataContent getContent(BinaryData binaryData) {
return binaryData.content;
}
});
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of
* inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream
* content is not cached if the stream is not read into a format that requires the content to be fully read into
* memory.
* <p>
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
* </p>
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStream
* <pre>
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
* BinaryData binaryData = BinaryData.fromStream&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStream
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static BinaryData fromStream(InputStream inputStream) {
return fromStream(inputStream, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}. Depending on the type of
* inputStream, the BinaryData instance created may or may not allow reading the content more than once. The stream
* content is not cached if the stream is not read into a format that requires the content to be fully read into
* memory.
* <p>
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
* </p>
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStream
* <pre>
* byte[] bytes = "Some Data".getBytes&
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
* BinaryData binaryData = BinaryData.fromStream&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStream
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static BinaryData fromStream(InputStream inputStream, Long length) {
return new BinaryData(new InputStreamContent(inputStream, length));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}.
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync
* <pre>
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromStreamAsync&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStreamAsync
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static Mono<BinaryData> fromStreamAsync(InputStream inputStream) {
return fromStreamAsync(inputStream, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link InputStream}.
* <b>NOTE:</b> The {@link InputStream} is not closed by this function.
*
* <p><strong>Create an instance from an InputStream</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromStreamAsync
* <pre>
* byte[] bytes = "Some Data".getBytes&
* final ByteArrayInputStream inputStream = new ByteArrayInputStream&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromStreamAsync&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromStreamAsync
*
* @param inputStream The {@link InputStream} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link Mono} of {@link BinaryData} representing the {@link InputStream}.
* @throws UncheckedIOException If any error happens while reading the {@link InputStream}.
* @throws NullPointerException If {@code inputStream} is null.
*/
public static Mono<BinaryData> fromStreamAsync(InputStream inputStream, Long length) {
return Mono.fromCallable(() -> fromStream(inputStream, length));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <p>This method aggregates data into single byte array.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws NullPointerException If {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data) {
return fromFlux(data, null);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <p>This method aggregates data into single byte array.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final long length = data.length;
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws IllegalArgumentException if the length is less than zero.
* @throws NullPointerException if {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length) {
return fromFlux(data, length, true);
}
/**
* Creates an instance of {@link BinaryData} from the given {@link Flux} of {@link ByteBuffer}.
* <p>
* If {@code bufferContent} is true and {@code length} is null the length of the returned {@link BinaryData} will be
* based on the length calculated by buffering. If {@code length} is non-null it will always be used as the
* {@link BinaryData} length even if buffering determines a different length.
*
* <p><strong>Create an instance from a Flux of ByteBuffer</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFlux
* <pre>
* final byte[] data = "Some Data".getBytes&
* final long length = data.length;
* final boolean shouldAggregateData = false;
* final Flux<ByteBuffer> dataFlux = Flux.just&
*
* Mono<BinaryData> binaryDataMono = BinaryData.fromFlux&
*
* Disposable subscriber = binaryDataMono
* .map&
* System.out.println&
* return true;
* &
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFlux
*
* @param data The {@link Flux} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @param length The length of {@code data} in bytes.
* @param bufferContent A flag indicating whether {@link Flux} should be buffered eagerly or consumption deferred.
* @return A {@link Mono} of {@link BinaryData} representing the {@link Flux} of {@link ByteBuffer}.
* @throws IllegalArgumentException if the length is less than zero.
* @throws NullPointerException if {@code data} is null.
*/
public static Mono<BinaryData> fromFlux(Flux<ByteBuffer> data, Long length, boolean bufferContent) {
if (data == null) {
return monoError(LOGGER, new NullPointerException("'data' cannot be null."));
}
if (length != null && length < 0) {
return monoError(LOGGER, new IllegalArgumentException("'length' cannot be less than 0."));
}
if (!bufferContent) {
return Mono.just(new BinaryData(new FluxByteBufferContent(data, length)));
}
long[] trueLength = new long[]{0};
return data.map(buffer -> {
int bufferSize = buffer.remaining();
ByteBuffer copy = ByteBuffer.allocate(bufferSize);
trueLength[0] += bufferSize;
copy.put(buffer);
copy.flip();
return copy;
})
.collect(LinkedList::new, (BiConsumer<LinkedList<ByteBuffer>, ByteBuffer>) LinkedList::add)
.map(buffers -> {
return new BinaryData(new FluxByteBufferContent(Flux.fromIterable(buffers).map(ByteBuffer::duplicate),
(length != null) ? length : trueLength[0], true));
});
}
/**
* Creates an instance of {@link BinaryData} from the given {@link String}.
* <p>
* The {@link String} is converted into bytes using {@link String
* {@link StandardCharsets
* </p>
* <p><strong>Create an instance from a String</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromString
* <pre>
* final String data = "Some Data";
* &
* BinaryData binaryData = BinaryData.fromString&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromString
*
* @param data The {@link String} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link String}.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromString(String data) {
return new BinaryData(new StringContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given byte array.
* <p>
* If the byte array is zero length an empty {@link BinaryData} will be returned. Note that the input byte array is
* used as a reference by this instance of {@link BinaryData} and any changes to the byte array outside of this
* instance will result in the contents of this BinaryData instance being updated as well. To safely update the byte
* array without impacting the BinaryData instance, perform an array copy first.
* </p>
*
* <p><strong>Create an instance from a byte array</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromBytes
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromBytes&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromBytes
*
* @param data The byte array that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the byte array.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromBytes(byte[] data) {
return new BinaryData(new ByteArrayContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link ByteBuffer}.
* <p>
* If the {@link ByteBuffer} is zero length an empty {@link BinaryData} will be returned. Note that the input
* {@link ByteBuffer} is used as a reference by this instance of {@link BinaryData} and any changes to the
* {@link ByteBuffer} outside of this instance will result in the contents of this BinaryData instance being updated
* as well. To safely update the {@link ByteBuffer} without impacting the BinaryData instance, perform an array copy
* first.
* </p>
*
* <p><strong>Create an instance from a ByteBuffer</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromByteBuffer
* <pre>
* final ByteBuffer data = ByteBuffer.wrap&
* BinaryData binaryData = BinaryData.fromByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromByteBuffer
*
* @param data The {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link ByteBuffer}.
* @throws NullPointerException If {@code data} is null.
*/
public static BinaryData fromByteBuffer(ByteBuffer data) {
return new BinaryData(new ByteBufferContent(data));
}
/**
* Creates an instance of {@link BinaryData} from the given {@link List} of {@link ByteBuffer}.
*
* <p>
* The input {@link ByteBuffer} instances are used as a reference by this instance of {@link BinaryData} and any
* changes to a {@link ByteBuffer} outside of this instance will result in the contents of this BinaryData instance
* being updated as well. To safely update the byte array without impacting the BinaryData instance, perform an
* array copy first.
* </p>
*
* <p><strong>Create an instance from a List<ByteBuffer></strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromListByteBuffer
* <pre>
* final List<ByteBuffer> data = Stream.of&
* .map&
* .collect&
* BinaryData binaryData = BinaryData.fromListByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromListByteBuffer
*
* @param data The {@link List} of {@link ByteBuffer} that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the {@link List} of {@link ByteBuffer}.
*/
public static BinaryData fromListByteBuffer(List<ByteBuffer> data) {
return new BinaryData(new ListByteBufferContent(data));
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default
* {@link JsonSerializer}.
*
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to serialize the object.
* </p>
* <p><strong>Creating an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* BinaryData binaryData = BinaryData.fromObject&
*
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObject
*
* @param data The object that will be JSON serialized that {@link BinaryData} will represent.
* @return A {@link BinaryData} representing the JSON serialized object.
* @throws NullPointerException If {@code data} is null.
* @see JsonSerializer
*/
public static BinaryData fromObject(Object data) {
return fromObject(data, SERIALIZER);
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the default
* {@link JsonSerializer}.
*
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to serialize the object.
* </p>
* <p><strong>Creating an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* Disposable subscriber = BinaryData.fromObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObjectAsync
*
* @param data The object that will be JSON serialized that {@link BinaryData} will represent.
* @return A {@link Mono} of {@link BinaryData} representing the JSON serialized object.
* @see JsonSerializer
*/
public static Mono<BinaryData> fromObjectAsync(Object data) {
return fromObjectAsync(data, SERIALIZER);
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed
* {@link ObjectSerializer}.
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
* </p>
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Create an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObject
*
* @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer}
* determines how {@code null} data is serialized.
* @param serializer The {@link ObjectSerializer} used to serialize object.
* @return A {@link BinaryData} representing the serialized object.
* @throws NullPointerException If {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public static BinaryData fromObject(Object data, ObjectSerializer serializer) {
return new BinaryData(new SerializableContent(data, serializer));
}
/**
* Creates an instance of {@link BinaryData} by serializing the {@link Object} using the passed
* {@link ObjectSerializer}.
*
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
* </p>
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Create an instance from an Object</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* final ObjectSerializer serializer = new MyJsonSerializer&
* Disposable subscriber = BinaryData.fromObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromObjectAsync
*
* @param data The object that will be serialized that {@link BinaryData} will represent. The {@code serializer}
* determines how {@code null} data is serialized.
* @param serializer The {@link ObjectSerializer} used to serialize object.
* @return A {@link Mono} of {@link BinaryData} representing the serialized object.
* @throws NullPointerException If {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public static Mono<BinaryData> fromObjectAsync(Object data, ObjectSerializer serializer) {
return Mono.fromCallable(() -> fromObject(data, serializer));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path} as its data. This method checks
* for the existence of the file at the time of creating an instance of {@link BinaryData}. The file, however, is
* not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile -->
* <pre>
* BinaryData binaryData = BinaryData.fromFile&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile -->
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
*/
public static BinaryData fromFile(Path file) {
return fromFile(file, STREAM_READ_SIZE);
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* BinaryData binaryData = BinaryData.fromFile&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param chunkSize The requested size for each read of the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, int chunkSize) {
return new BinaryData(new FileContent(file, chunkSize, null, null));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <p>The {@link BinaryData} returned from this method uses 8KB chunk size when reading file content.</p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* long position = 1024;
* long length = 100 * 1048;
* BinaryData binaryData = BinaryData.fromFile&
* new File&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param position Position, or offset, within the path where reading begins.
* @param length Maximum number of bytes to be read from the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, Long position, Long length) {
return new BinaryData(new FileContent(file, STREAM_READ_SIZE, position, length));
}
/**
* Creates a {@link BinaryData} that uses the content of the file at {@link Path file} as its data. This method
* checks for the existence of the file at the time of creating an instance of {@link BinaryData}. The file,
* however, is not read until there is an attempt to read the contents of the returned BinaryData instance.
*
* <p><strong>Create an instance from a file</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.fromFile
* <pre>
* long position = 1024;
* long length = 100 * 1048;
* int chunkSize = 8092;
* BinaryData binaryData = BinaryData.fromFile&
* new File&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.fromFile
*
* @param file The {@link Path} that will be the {@link BinaryData} data.
* @param position Position, or offset, within the path where reading begins.
* @param length Maximum number of bytes to be read from the path.
* @param chunkSize The requested size for each read of the path.
* @return A new {@link BinaryData}.
* @throws NullPointerException If {@code file} is null.
* @throws IllegalArgumentException If {@code offset} or {@code length} are negative or {@code offset} plus
* {@code length} is greater than the file size or {@code chunkSize} is less than or equal to 0.
* @throws UncheckedIOException if the file does not exist.
*/
public static BinaryData fromFile(Path file, Long position, Long length, int chunkSize) {
return new BinaryData(new FileContent(file, chunkSize, position, length));
}
/**
* Returns a byte array representation of this {@link BinaryData}.
* <p>
* This method returns a reference to the underlying byte array. Modifying the contents of the returned byte array
* may change the content of this BinaryData instance. If the content source of this BinaryData instance is a file,
* an {@link InputStream}, or a {@code Flux<ByteBuffer>} the source is not modified. To safely update the byte
* array, it is recommended to make a copy of the contents first.
* <p>
* If the {@link BinaryData} is larger than the maximum size allowed for a {@code byte[]} this will throw an
* {@link IllegalStateException}.
*
* @return A byte array representing this {@link BinaryData}.
* @throws IllegalStateException If the {@link BinaryData} is larger than the maximum size allowed for a
* {@code byte[]}.
*/
public byte[] toBytes() {
return content.toBytes();
}
/**
* Returns a {@link String} representation of this {@link BinaryData} by converting its data using the UTF-8
* character set. A new instance of String is created each time this method is called.
* <p>
* If the {@link BinaryData} is larger than the maximum size allowed for a {@link String} this will throw an
* {@link IllegalStateException}.
*
* @return A {@link String} representing this {@link BinaryData}.
* @throws IllegalStateException If the {@link BinaryData} is larger than the maximum size allowed for a
* {@link String}.
*/
public String toString() {
return content.toString();
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param <T> Type of the deserialized Object.
* @param clazz The {@link Class} representing the Object's type.
* @return An {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} is null.
* @see JsonSerializer
*/
public <T> T toObject(Class<T> clazz) {
return toObject(TypeReference.createInstance(clazz), SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* &
* &
* &
* &
* &
*
*
* BinaryData binaryData = BinaryData.fromObject&
*
* List<Person> persons = binaryData.toObject&
* persons.forEach&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} is null.
* @see JsonSerializer
*/
public <T> T toObject(TypeReference<T> typeReference) {
return toObject(typeReference, SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param clazz The {@link Class} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> T toObject(Class<T> clazz, ObjectSerializer serializer) {
return toObject(TypeReference.createInstance(clazz), serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Person person = binaryData.toObject&
* System.out.println&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObject
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* &
* List<Person> persons = binaryData.toObject&
* persons.forEach&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObject
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return An {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> T toObject(TypeReference<T> typeReference, ObjectSerializer serializer) {
Objects.requireNonNull(typeReference, "'typeReference' cannot be null.");
Objects.requireNonNull(serializer, "'serializer' cannot be null.");
return content.toObject(typeReference, serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param clazz The {@link Class} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} is null.
* @see JsonSerializer
*/
public <T> Mono<T> toObjectAsync(Class<T> clazz) {
return toObjectAsync(TypeReference.createInstance(clazz), SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the default
* {@link JsonSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* <b>Note:</b> This method first looks for a {@link JsonSerializerProvider} implementation on the classpath. If no
* implementation is found, a default Jackson-based implementation will be used to deserialize the object.
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
* &
* &
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the JSON deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} is null.
* @see JsonSerializer
*/
public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference) {
return toObjectAsync(typeReference, SERIALIZER);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link Class}, should be a non-generic class, for generic classes use
* {@link
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData.toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param clazz The {@link Class} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code clazz} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> Mono<T> toObjectAsync(Class<T> clazz, ObjectSerializer serializer) {
return toObjectAsync(TypeReference.createInstance(clazz), serializer);
}
/**
* Returns an {@link Object} representation of this {@link BinaryData} by deserializing its data using the passed
* {@link ObjectSerializer}. Each time this method is called, the content is deserialized and a new instance of type
* {@code T} is returned. So, calling this method repeatedly to convert the underlying data source into the same
* type is not recommended.
* <p>
* The type, represented by {@link TypeReference}, can either be a generic or non-generic type. If the type is
* generic create a sub-type of {@link TypeReference}, if the type is non-generic use
* {@link TypeReference
* <p>
* The passed {@link ObjectSerializer} can either be one of the implementations offered by the Azure SDKs or your
* own implementation.
*
* <p><strong>Azure SDK implementations</strong></p>
* <ul>
* <li><a href="https:
* <li><a href="https:
* </ul>
*
* <p><strong>Get a non-generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person data = new Person&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData
* .toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* <p><strong>Get a generic Object from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toObjectAsync
* <pre>
* final Person person1 = new Person&
* final Person person2 = new Person&
*
* List<Person> personList = new ArrayList<>&
* personList.add&
* personList.add&
*
* &
* &
* &
*
* final ObjectSerializer serializer = new MyJsonSerializer&
* BinaryData binaryData = BinaryData.fromObject&
*
* Disposable subscriber = binaryData
* .toObjectAsync&
* .subscribe&
*
* &
* TimeUnit.SECONDS.sleep&
* subscriber.dispose&
* </pre>
* <!-- end com.azure.core.util.BinaryData.toObjectAsync
*
* @param typeReference The {@link TypeReference} representing the Object's type.
* @param serializer The {@link ObjectSerializer} used to deserialize object.
* @param <T> Type of the deserialized Object.
* @return A {@link Mono} of {@link Object} representing the deserialized {@link BinaryData}.
* @throws NullPointerException If {@code typeReference} or {@code serializer} is null.
* @see ObjectSerializer
* @see JsonSerializer
* @see <a href="https:
*/
public <T> Mono<T> toObjectAsync(TypeReference<T> typeReference, ObjectSerializer serializer) {
return Mono.fromCallable(() -> toObject(typeReference, serializer));
}
/**
* Returns an {@link InputStream} representation of this {@link BinaryData}.
*
* <p><strong>Get an InputStream from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.core.util.BinaryData.toStream -->
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromStream&
* final byte[] bytes = new byte[data.length];
* try &
* inputStream.read&
* System.out.println&
* &
* </pre>
* <!-- end com.azure.core.util.BinaryData.toStream -->
*
* @return An {@link InputStream} representing the {@link BinaryData}.
*/
public InputStream toStream() {
return content.toStream();
}
/**
* Returns a read-only {@link ByteBuffer} representation of this {@link BinaryData}.
* <p>
* Attempting to mutate the returned {@link ByteBuffer} will throw a {@link ReadOnlyBufferException}.
*
* <p><strong>Get a read-only ByteBuffer from the BinaryData</strong></p>
*
* <!-- src_embed com.azure.util.BinaryData.toByteBuffer -->
* <pre>
* final byte[] data = "Some Data".getBytes&
* BinaryData binaryData = BinaryData.fromBytes&
* final byte[] bytes = new byte[data.length];
* binaryData.toByteBuffer&
* System.out.println&
* </pre>
* <!-- end com.azure.util.BinaryData.toByteBuffer -->
*
* @return A read-only {@link ByteBuffer} representing the {@link BinaryData}.
*/
public ByteBuffer toByteBuffer() {
return content.toByteBuffer();
}
/**
* Returns the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}. The content
* is not read from the underlying data source until the {@link Flux} is subscribed to.
*
* @return the content of this {@link BinaryData} instance as a flux of {@link ByteBuffer ByteBuffers}.
*/
public Flux<ByteBuffer> toFluxByteBuffer() {
return content.toFluxByteBuffer();
}
/**
* Writes the contents of this {@link BinaryData} to the given {@link OutputStream}.
* <p>
* This method does not close the {@link OutputStream}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param outputStream The {@link OutputStream} to write the contents of this {@link BinaryData} to.
* @throws NullPointerException If {@code outputStream} is null.
* @throws IOException If an I/O error occurs.
*/
/**
* Writes the contents of this {@link BinaryData} to the given {@link WritableByteChannel}.
* <p>
* This method does not close the {@link WritableByteChannel}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param channel The {@link WritableByteChannel} to write the contents of this {@link BinaryData} to.
* @throws NullPointerException If {@code channel} is null.
* @throws IOException If an I/O error occurs.
*/
public void writeTo(WritableByteChannel channel) throws IOException {
Objects.requireNonNull(channel, "'channel' cannot be null.");
content.writeTo(channel);
}
/**
* Writes the contents of this {@link BinaryData} to the given {@link AsynchronousByteChannel}.
* <p>
* This method does not close the {@link AsynchronousByteChannel}.
* <p>
* The contents of this {@link BinaryData} will be written without buffering. If the underlying data source isn't
* {@link
* again. If it needs to be read again, use {@link
*
* @param channel The {@link AsynchronousByteChannel} to write the contents of this {@link BinaryData} to.
* @return A {@link Mono} the completes once content has been written or had an error writing.
* @throws NullPointerException If {@code channel} is null.
*/
public Mono<Void> writeTo(AsynchronousByteChannel channel) {
return content.writeTo(channel);
}
/**
* Returns the length of the content, if it is known. The length can be {@code null} if the source did not specify
* the length or the length cannot be determined without reading the whole content.
*
* @return the length of the content, if it is known.
*/
public Long getLength() {
return content.getLength();
}
/**
* Returns a flag indicating whether the content can be repeatedly consumed using all accessors including
* {@link
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors simultaneously regardless of
* what this method returns.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayability -->
* <pre>
* BinaryData binaryData = binaryDataProducer&
*
* if &
* binaryData = binaryData.toReplayableBinaryData&
* &
*
* streamConsumer&
* streamConsumer&
* </pre>
* <!-- end com.azure.util.BinaryData.replayability -->
*
* <!-- src_embed com.azure.util.BinaryData.replayabilityAsync -->
* <pre>
* Mono.fromCallable&
* .flatMap&
* if &
* return Mono.just&
* &
* return binaryData.toReplayableBinaryDataAsync&
* &
* &
* .flatMap&
* fluxConsumer&
* .then&
* .subscribe&
* </pre>
* <!-- end com.azure.util.BinaryData.replayabilityAsync -->
*
* @return a flag indicating whether the content can be repeatedly consumed using all accessors.
*/
public boolean isReplayable() {
return content.isReplayable();
}
/**
* Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed
* repeatedly using all accessors including {@link
*
* <p>
* A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and
* resetting a stream or buffering in memory are employed to assure replayability.
* </p>
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData}
* simultaneously.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayability -->
* <pre>
* BinaryData binaryData = binaryDataProducer&
*
* if &
* binaryData = binaryData.toReplayableBinaryData&
* &
*
* streamConsumer&
* streamConsumer&
* </pre>
* <!-- end com.azure.util.BinaryData.replayability -->
*
* @return Replayable {@link BinaryData}.
*/
public BinaryData toReplayableBinaryData() {
if (this.isReplayable()) {
return this;
} else {
return new BinaryData(content.toReplayableContent());
}
}
/**
* Converts the {@link BinaryData} into a {@link BinaryData} that is replayable, i.e. content can be consumed
* repeatedly using all accessors including {@link
*
* <p>
* A {@link BinaryData} that is already replayable is returned as is. Otherwise techniques like marking and
* resetting a stream or buffering in memory are employed to assure replayability.
* </p>
*
* <p>
* Replayability does not imply thread-safety. The caller must not use data accessors of returned {@link BinaryData}
* simultaneously.
* </p>
*
* <!-- src_embed com.azure.util.BinaryData.replayabilityAsync -->
* <pre>
* Mono.fromCallable&
* .flatMap&
* if &
* return Mono.just&
* &
* return binaryData.toReplayableBinaryDataAsync&
* &
* &
* .flatMap&
* fluxConsumer&
* .then&
* .subscribe&
* </pre>
* <!-- end com.azure.util.BinaryData.replayabilityAsync -->
*
* @return A {@link Mono} of {@link BinaryData} representing the replayable {@link BinaryData}.
*/
public Mono<BinaryData> toReplayableBinaryDataAsync() {
if (isReplayable()) {
return Mono.just(this);
} else {
return content.toReplayableContentAsync().map(BinaryData::new);
}
}
} |
Given the previous logic used this type of throw it may be breaking change | public void writeTo(BufferedSink bufferedSink) throws IOException {
if (!body.isReplayable() && !BODY_SENT_UPDATER.compareAndSet(this, 0, 1)) {
throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send body is not supported."));
} else {
body.writeTo(bufferedSink);
}
} | throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send body is not supported.")); | public void writeTo(BufferedSink bufferedSink) throws IOException {
if (!body.isReplayable() && !BODY_SENT_UPDATER.compareAndSet(this, 0, 1)) {
throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send body is not supported."));
} else {
body.writeTo(bufferedSink);
}
} | class BinaryDataRequestBody extends RequestBody {
private static final ClientLogger LOGGER = new ClientLogger(BinaryDataRequestBody.class);
private final MediaType contentType;
private final BinaryData body;
private final long effectiveContentLength;
private volatile int bodySent = 0;
private static final AtomicIntegerFieldUpdater<BinaryDataRequestBody> BODY_SENT_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(BinaryDataRequestBody.class, "bodySent");
/**
* Creates a new instance of the BinaryDataRequestBody class.
*
* @param body The {@link BinaryData} to use as the body.
* @param contentType The content type of the body.
* @param effectiveContentLength The length of the body.
*/
public BinaryDataRequestBody(BinaryData body, MediaType contentType, long effectiveContentLength) {
this.body = body;
this.contentType = contentType;
this.effectiveContentLength = effectiveContentLength;
}
@Override
public long contentLength() throws IOException {
return effectiveContentLength;
}
@Override
public boolean isOneShot() {
return !body.isReplayable();
}
@Override
public MediaType contentType() {
return contentType;
}
@Override
} | class BinaryDataRequestBody extends RequestBody {
private static final ClientLogger LOGGER = new ClientLogger(BinaryDataRequestBody.class);
private final MediaType contentType;
private final BinaryData body;
private final long effectiveContentLength;
private volatile int bodySent = 0;
private static final AtomicIntegerFieldUpdater<BinaryDataRequestBody> BODY_SENT_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(BinaryDataRequestBody.class, "bodySent");
/**
* Creates a new instance of the BinaryDataRequestBody class.
*
* @param body The {@link BinaryData} to use as the body.
* @param contentType The content type of the body.
* @param effectiveContentLength The length of the body.
*/
public BinaryDataRequestBody(BinaryData body, MediaType contentType, long effectiveContentLength) {
this.body = body;
this.contentType = contentType;
this.effectiveContentLength = effectiveContentLength;
}
@Override
public long contentLength() throws IOException {
return effectiveContentLength;
}
@Override
public boolean isOneShot() {
return !body.isReplayable();
}
@Override
public MediaType contentType() {
return contentType;
}
@Override
} |
nit ```suggestion logger.atWarning() .addKeyValue("headerName", headerName) .log("The header is restricted by default in JDK HttpClient 12 " + "and will be ignored. To allow this header to be set on the request, configure the " + "restricted headers on the HttpPipelinePolicy using " + "HttpPipelinePolicyBuilder#addRestrictedHeaders."); ``` | public void forEach(BiConsumer<? super String, ? super List<String>> action) {
rawHeaders.forEach((headerName, header) -> {
if (restrictedHeaders.contains(headerName)) {
logger.warning("The header '" + headerName + "' is restricted by default in JDK HttpClient 12 "
+ "and will be ignored. To allow this header to be set on the request, configure the "
+ "restricted headers on the HttpPipelinePolicy using "
+ "HttpPipelinePolicyBuilder
} else {
action.accept(header.getName(), header.getValuesList());
}
});
} | + "HttpPipelinePolicyBuilder | public void forEach(BiConsumer<? super String, ? super List<String>> action) {
rawHeaders.forEach((headerName, header) -> {
if (restrictedHeaders.contains(headerName)) {
logger.atWarning()
.addKeyValue("headerName", headerName)
.log("The header is restricted by 'java.net.http.HttpClient' and will be ignored. To allow this "
+ "header to be set on the request, configure 'jdk.httpclient.allowRestrictedHeaders' with the "
+ "header added in the comma-separated list.");
} else {
action.accept(header.getName(), header.getValuesList());
}
});
} | class HeaderFilteringMap extends AbstractMap<String, List<String>> {
private final Map<String, HttpHeader> rawHeaders;
private final Set<String> restrictedHeaders;
private final ClientLogger logger;
/**
* Creates a new HeaderFilteringMap.
*
* @param rawHeaders The raw headers map.
* @param restrictedHeaders The header filter.
* @param logger The logger to log any errors.
*/
HeaderFilteringMap(Map<String, HttpHeader> rawHeaders, Set<String> restrictedHeaders, ClientLogger logger) {
this.rawHeaders = rawHeaders;
this.restrictedHeaders = restrictedHeaders;
this.logger = logger;
}
@Override
public Set<Entry<String, List<String>>> entrySet() {
throw logger.logExceptionAsError(
new UnsupportedOperationException("The only operation permitted by this Map is forEach."));
}
@Override
} | class HeaderFilteringMap extends AbstractMap<String, List<String>> {
private final Map<String, HttpHeader> rawHeaders;
private final Set<String> restrictedHeaders;
private final ClientLogger logger;
/**
* Creates a new HeaderFilteringMap.
*
* @param rawHeaders The raw headers map.
* @param restrictedHeaders The header filter.
* @param logger The logger to log any errors.
*/
HeaderFilteringMap(Map<String, HttpHeader> rawHeaders, Set<String> restrictedHeaders, ClientLogger logger) {
this.rawHeaders = rawHeaders;
this.restrictedHeaders = restrictedHeaders;
this.logger = logger;
}
@Override
public Set<Entry<String, List<String>>> entrySet() {
throw logger.logExceptionAsError(
new UnsupportedOperationException("The only operation permitted by this Map is forEach."));
}
@Override
} |
We generally don't use `block()` in async samples as that doesn't really show how to use the asynchronous APIs. `block()` just converts this into a sync call. Take a look at some of the examples below: - [Sentiment Analysis](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/textanalytics/azure-ai-textanalytics/src/samples/java/com/azure/ai/textanalytics/AnalyzeSentimentAsync.java#L30-L44) - [App Configuration](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/appconfiguration/azure-data-appconfiguration/src/samples/java/com/azure/data/appconfiguration/HelloWorldAsync.java#L51-L62) - [Key Vault key creation](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/keyvault/azure-security-keyvault-keys/src/samples/java/com/azure/security/keyvault/keys/HelloWorldAsync.java#L76-L81) | public static void main(String[] args) {
String endpoint = System.getenv("VISION_ENDPOINT");
String key = System.getenv("VISION_KEY");
if (endpoint == null || key == null) {
System.out.println("Missing environment variable 'VISION_ENDPOINT' or 'VISION_KEY'.");
System.out.println("Set them before running this sample.");
System.exit(1);
}
ImageAnalysisAsyncClient client = new ImageAnalysisClientBuilder()
.endpoint(endpoint)
.credential(new KeyCredential(key))
.buildAsyncClient();
try {
Mono<ImageAnalysisResult> result = client.analyze(
new URL("https:
Arrays.asList(VisualFeatures.READ),
null);
printAnalysisResults(result.block());
} catch (Exception e) {
e.printStackTrace();
}
} | printAnalysisResults(result.block()); | public static void main(String[] args) throws MalformedURLException, InterruptedException {
String endpoint = System.getenv("VISION_ENDPOINT");
String key = System.getenv("VISION_KEY");
if (endpoint == null || key == null) {
System.out.println("Missing environment variable 'VISION_ENDPOINT' or 'VISION_KEY'.");
System.out.println("Set them before running this sample.");
System.exit(1);
}
ImageAnalysisAsyncClient client = new ImageAnalysisClientBuilder()
.endpoint(endpoint)
.credential(new KeyCredential(key))
.buildAsyncClient();
client.analyze(
new URL("https:
Arrays.asList(VisualFeatures.READ),
null)
.subscribe(
result -> printAnalysisResults(result),
error -> System.err.println("Image analysis terminated with error message: " + error));
TimeUnit.SECONDS.sleep(5);
} | class SampleOcrImageUrlAsync {
public static void printAnalysisResults(ImageAnalysisResult result) {
System.out.println("Image analysis results:");
System.out.println(" Read:");
for (DetectedTextLine line : result.getRead().getBlocks().get(0).getLines()) {
System.out.println(" Line: '" + line.getText()
+ "', Bounding polygon " + line.getBoundingPolygon());
for (DetectedTextWord word : line.getWords()) {
System.out.println(" Word: '" + word.getText()
+ "', Bounding polygon " + word.getBoundingPolygon()
+ ", Confidence " + String.format("%.4f", word.getConfidence()));
}
}
System.out.println(" Image height = " + result.getMetadata().getHeight());
System.out.println(" Image width = " + result.getMetadata().getWidth());
System.out.println(" Model version = " + result.getModelVersion());
}
} | class SampleOcrImageUrlAsync {
public static void printAnalysisResults(ImageAnalysisResult result) {
System.out.println("Image analysis results:");
System.out.println(" Read:");
for (DetectedTextLine line : result.getRead().getBlocks().get(0).getLines()) {
System.out.println(" Line: '" + line.getText()
+ "', Bounding polygon " + line.getBoundingPolygon());
for (DetectedTextWord word : line.getWords()) {
System.out.println(" Word: '" + word.getText()
+ "', Bounding polygon " + word.getBoundingPolygon()
+ ", Confidence " + String.format("%.4f", word.getConfidence()));
}
}
System.out.println(" Image height = " + result.getMetadata().getHeight());
System.out.println(" Image width = " + result.getMetadata().getWidth());
System.out.println(" Model version = " + result.getModelVersion());
}
} |
Thanks @srnagar, I made the changes based on the examples you provided. | public static void main(String[] args) {
String endpoint = System.getenv("VISION_ENDPOINT");
String key = System.getenv("VISION_KEY");
if (endpoint == null || key == null) {
System.out.println("Missing environment variable 'VISION_ENDPOINT' or 'VISION_KEY'.");
System.out.println("Set them before running this sample.");
System.exit(1);
}
ImageAnalysisAsyncClient client = new ImageAnalysisClientBuilder()
.endpoint(endpoint)
.credential(new KeyCredential(key))
.buildAsyncClient();
try {
Mono<ImageAnalysisResult> result = client.analyze(
new URL("https:
Arrays.asList(VisualFeatures.READ),
null);
printAnalysisResults(result.block());
} catch (Exception e) {
e.printStackTrace();
}
} | printAnalysisResults(result.block()); | public static void main(String[] args) throws MalformedURLException, InterruptedException {
String endpoint = System.getenv("VISION_ENDPOINT");
String key = System.getenv("VISION_KEY");
if (endpoint == null || key == null) {
System.out.println("Missing environment variable 'VISION_ENDPOINT' or 'VISION_KEY'.");
System.out.println("Set them before running this sample.");
System.exit(1);
}
ImageAnalysisAsyncClient client = new ImageAnalysisClientBuilder()
.endpoint(endpoint)
.credential(new KeyCredential(key))
.buildAsyncClient();
client.analyze(
new URL("https:
Arrays.asList(VisualFeatures.READ),
null)
.subscribe(
result -> printAnalysisResults(result),
error -> System.err.println("Image analysis terminated with error message: " + error));
TimeUnit.SECONDS.sleep(5);
} | class SampleOcrImageUrlAsync {
public static void printAnalysisResults(ImageAnalysisResult result) {
System.out.println("Image analysis results:");
System.out.println(" Read:");
for (DetectedTextLine line : result.getRead().getBlocks().get(0).getLines()) {
System.out.println(" Line: '" + line.getText()
+ "', Bounding polygon " + line.getBoundingPolygon());
for (DetectedTextWord word : line.getWords()) {
System.out.println(" Word: '" + word.getText()
+ "', Bounding polygon " + word.getBoundingPolygon()
+ ", Confidence " + String.format("%.4f", word.getConfidence()));
}
}
System.out.println(" Image height = " + result.getMetadata().getHeight());
System.out.println(" Image width = " + result.getMetadata().getWidth());
System.out.println(" Model version = " + result.getModelVersion());
}
} | class SampleOcrImageUrlAsync {
public static void printAnalysisResults(ImageAnalysisResult result) {
System.out.println("Image analysis results:");
System.out.println(" Read:");
for (DetectedTextLine line : result.getRead().getBlocks().get(0).getLines()) {
System.out.println(" Line: '" + line.getText()
+ "', Bounding polygon " + line.getBoundingPolygon());
for (DetectedTextWord word : line.getWords()) {
System.out.println(" Word: '" + word.getText()
+ "', Bounding polygon " + word.getBoundingPolygon()
+ ", Confidence " + String.format("%.4f", word.getConfidence()));
}
}
System.out.println(" Image height = " + result.getMetadata().getHeight());
System.out.println(" Image width = " + result.getMetadata().getWidth());
System.out.println(" Model version = " + result.getModelVersion());
}
} |
This can be simplified to just add the additional SearchField to `searchFields`. Another option would be adding `SimpleField` or `SearchableField` annotations to the Hotel class. ```java @SimpleField(isKey = true, isFilterable = true, isSortable = true) private String id; @SearchableField(isFilterable = true, isSortable = true) private String name; ``` | public void createIndexUseFieldBuilder() {
List<SearchField> searchFields = SearchIndexClient.buildSearchFields(Hotel.class, null);
List<SearchField> searchFieldList = new ArrayList<>();
searchFieldList.add(new SearchField("hotelId", SearchFieldDataType.STRING)
.setKey(true)
.setFilterable(true)
.setSortable(true));
SEARCH_INDEX_CLIENT.createIndex(new SearchIndex("index1", searchFields).setFields(searchFieldList));
} | searchFieldList.add(new SearchField("hotelId", SearchFieldDataType.STRING) | public void createIndexUseFieldBuilder() {
List<SearchField> searchFields = SearchIndexClient.buildSearchFields(Hotel.class, null);
SEARCH_INDEX_CLIENT.createIndex(new SearchIndex("index", searchFields));
} | class Hotel {
private String id;
private String name;
public String getId() {
return id;
}
public Hotel setId(String id) {
this.id = id;
return this;
}
public String getName() {
return name;
}
public Hotel setName(String name) {
this.name = name;
return this;
}
} | class Hotel {
@SimpleField(isKey = true, isFilterable = true, isSortable = true)
private String id;
@SearchableField(isFilterable = true, isSortable = true)
private String name;
public String getId() {
return id;
}
public Hotel setId(String id) {
this.id = id;
return this;
}
public String getName() {
return name;
}
public Hotel setName(String name) {
this.name = name;
return this;
}
} |
We can do it here. But in SDK, seems won't able to do such in a subclass of `Resource`. Maybe we do need to add a protected ctor. | public static Resource fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Resource resource = new Resource();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("id".equals(fieldName)) {
ProxyResourceAccessHelper.setId(resource, reader.getString());
} else if ("name".equals(fieldName)) {
ProxyResourceAccessHelper.setName(resource, reader.getString());
} else if ("type".equals(fieldName)) {
ProxyResourceAccessHelper.setType(resource, reader.getString());
} else if ("location".equals(fieldName)) {
resource.location = reader.getString();
} else if ("tags".equals(fieldName)) {
resource.tags = reader.readMap(JsonReader::getString);
} else {
reader.skipChildren();
}
}
return resource;
});
} | ProxyResourceAccessHelper.setType(resource, reader.getString()); | public static Resource fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Resource resource = new Resource();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("id".equals(fieldName)) {
ProxyResourceAccessHelper.setId(resource, reader.getString());
} else if ("name".equals(fieldName)) {
ProxyResourceAccessHelper.setName(resource, reader.getString());
} else if ("type".equals(fieldName)) {
ProxyResourceAccessHelper.setType(resource, reader.getString());
} else if ("location".equals(fieldName)) {
resource.location = reader.getString();
} else if ("tags".equals(fieldName)) {
resource.tags = reader.readMap(JsonReader::getString);
} else {
reader.skipChildren();
}
}
return resource;
});
} | class Resource extends ProxyResource {
@JsonProperty(required = true)
private String location;
private Map<String, String> tags;
/**
* Creates an instance of {@link Resource}.
*/
public Resource() {
}
/**
* Get the location value.
*
* @return the geolocation where the resource live.
*/
public String location() {
return this.location;
}
/**
* Set the location value.
*
* @param location the geolocation where the resource live.
* @return the resource itself.
*/
public Resource withLocation(String location) {
this.location = location;
return this;
}
/**
* Get the tags value.
*
* @return the tags of the resource.
*/
public Map<String, String> tags() {
return this.tags;
}
/**
* Set the tags value.
*
* @param tags the tags of the resource.
* @return the resource itself.
*/
public Resource withTags(Map<String, String> tags) {
this.tags = tags;
return this;
}
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
return jsonWriter.writeStartObject()
.writeStringField("location", location)
.writeMapField("tags", tags, JsonWriter::writeString)
.writeEndObject();
}
/**
* Reads a JSON stream into a {@link Resource}.
*
* @param jsonReader The {@link JsonReader} being read.
* @return The {@link Resource} that the JSON stream represented, may return null.
* @throws IOException If a {@link Resource} fails to be read from the {@code jsonReader}.
*/
} | class Resource extends ProxyResource {
@JsonProperty(required = true)
private String location;
private Map<String, String> tags;
/**
* Creates an instance of {@link Resource}.
*/
public Resource() {
}
/**
* Get the location value.
*
* @return the geolocation where the resource live.
*/
public String location() {
return this.location;
}
/**
* Set the location value.
*
* @param location the geolocation where the resource live.
* @return the resource itself.
*/
public Resource withLocation(String location) {
this.location = location;
return this;
}
/**
* Get the tags value.
*
* @return the tags of the resource.
*/
public Map<String, String> tags() {
return this.tags;
}
/**
* Set the tags value.
*
* @param tags the tags of the resource.
* @return the resource itself.
*/
public Resource withTags(Map<String, String> tags) {
this.tags = tags;
return this;
}
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
return jsonWriter.writeStartObject()
.writeStringField("location", location)
.writeMapField("tags", tags, JsonWriter::writeString)
.writeEndObject();
}
/**
* Reads a JSON stream into a {@link Resource}.
*
* @param jsonReader The {@link JsonReader} being read.
* @return The {@link Resource} that the JSON stream represented, may return null.
* @throws IOException If a {@link Resource} fails to be read from the {@code jsonReader}.
*/
} |
Yeah, I was thinking about adding that constructor. Though would we want to share that through all implementations, requiring these three values to be passed through the constructor. | public static Resource fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Resource resource = new Resource();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("id".equals(fieldName)) {
ProxyResourceAccessHelper.setId(resource, reader.getString());
} else if ("name".equals(fieldName)) {
ProxyResourceAccessHelper.setName(resource, reader.getString());
} else if ("type".equals(fieldName)) {
ProxyResourceAccessHelper.setType(resource, reader.getString());
} else if ("location".equals(fieldName)) {
resource.location = reader.getString();
} else if ("tags".equals(fieldName)) {
resource.tags = reader.readMap(JsonReader::getString);
} else {
reader.skipChildren();
}
}
return resource;
});
} | ProxyResourceAccessHelper.setType(resource, reader.getString()); | public static Resource fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
Resource resource = new Resource();
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("id".equals(fieldName)) {
ProxyResourceAccessHelper.setId(resource, reader.getString());
} else if ("name".equals(fieldName)) {
ProxyResourceAccessHelper.setName(resource, reader.getString());
} else if ("type".equals(fieldName)) {
ProxyResourceAccessHelper.setType(resource, reader.getString());
} else if ("location".equals(fieldName)) {
resource.location = reader.getString();
} else if ("tags".equals(fieldName)) {
resource.tags = reader.readMap(JsonReader::getString);
} else {
reader.skipChildren();
}
}
return resource;
});
} | class Resource extends ProxyResource {
@JsonProperty(required = true)
private String location;
private Map<String, String> tags;
/**
* Creates an instance of {@link Resource}.
*/
public Resource() {
}
/**
* Get the location value.
*
* @return the geolocation where the resource live.
*/
public String location() {
return this.location;
}
/**
* Set the location value.
*
* @param location the geolocation where the resource live.
* @return the resource itself.
*/
public Resource withLocation(String location) {
this.location = location;
return this;
}
/**
* Get the tags value.
*
* @return the tags of the resource.
*/
public Map<String, String> tags() {
return this.tags;
}
/**
* Set the tags value.
*
* @param tags the tags of the resource.
* @return the resource itself.
*/
public Resource withTags(Map<String, String> tags) {
this.tags = tags;
return this;
}
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
return jsonWriter.writeStartObject()
.writeStringField("location", location)
.writeMapField("tags", tags, JsonWriter::writeString)
.writeEndObject();
}
/**
* Reads a JSON stream into a {@link Resource}.
*
* @param jsonReader The {@link JsonReader} being read.
* @return The {@link Resource} that the JSON stream represented, may return null.
* @throws IOException If a {@link Resource} fails to be read from the {@code jsonReader}.
*/
} | class Resource extends ProxyResource {
@JsonProperty(required = true)
private String location;
private Map<String, String> tags;
/**
* Creates an instance of {@link Resource}.
*/
public Resource() {
}
/**
* Get the location value.
*
* @return the geolocation where the resource live.
*/
public String location() {
return this.location;
}
/**
* Set the location value.
*
* @param location the geolocation where the resource live.
* @return the resource itself.
*/
public Resource withLocation(String location) {
this.location = location;
return this;
}
/**
* Get the tags value.
*
* @return the tags of the resource.
*/
public Map<String, String> tags() {
return this.tags;
}
/**
* Set the tags value.
*
* @param tags the tags of the resource.
* @return the resource itself.
*/
public Resource withTags(Map<String, String> tags) {
this.tags = tags;
return this;
}
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
return jsonWriter.writeStartObject()
.writeStringField("location", location)
.writeMapField("tags", tags, JsonWriter::writeString)
.writeEndObject();
}
/**
* Reads a JSON stream into a {@link Resource}.
*
* @param jsonReader The {@link JsonReader} being read.
* @return The {@link Resource} that the JSON stream represented, may return null.
* @throws IOException If a {@link Resource} fails to be read from the {@code jsonReader}.
*/
} |
Why the ProxyResource instance is not assigned to anything? | private static void ensureAccessor() {
if (accessor == null) {
new ProxyResource();
}
} | new ProxyResource(); | private static void ensureAccessor() {
if (accessor == null) {
new ProxyResource();
}
} | class ProxyResourceAccessHelper {
private static ProxyResourceAccessor accessor;
/**
* Type defining the methods to set the non-public properties of a {@link ProxyResource} instance.
*/
public interface ProxyResourceAccessor {
/**
* Sets the {@code id} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code id} property is to be set.
* @param id the id value.
*/
void setId(ProxyResource proxyResource, String id);
/**
* Sets the {@code name} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code name} property is to be set.
* @param name the name value.
*/
void setName(ProxyResource proxyResource, String name);
/**
* Sets the {@code type} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code type} property is to be set.
* @param type the type value.
*/
void setType(ProxyResource proxyResource, String type);
}
/**
* The method called from {@link ProxyResource} to set it's accessor.
*
* @param proxyResourceAccessor the accessor.
*/
public static void setAccessor(final ProxyResourceAccessor proxyResourceAccessor) {
accessor = proxyResourceAccessor;
}
/**
* Sets the {@code id} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code id} property is to be set.
* @param id the id value.
*/
public static void setId(ProxyResource proxyResource, String id) {
ensureAccessor();
accessor.setId(proxyResource, id);
}
/**
* Sets the {@code name} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code name} property is to be set.
* @param name the name value.
*/
public static void setName(ProxyResource proxyResource, String name) {
ensureAccessor();
accessor.setName(proxyResource, name);
}
/**
* Sets the {@code type} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code type} property is to be set.
* @param type the type value.
*/
public static void setType(ProxyResource proxyResource, String type) {
ensureAccessor();
accessor.setType(proxyResource, type);
}
private ProxyResourceAccessHelper() {
}
} | class ProxyResourceAccessHelper {
private static ProxyResourceAccessor accessor;
/**
* Type defining the methods to set the non-public properties of a {@link ProxyResource} instance.
*/
public interface ProxyResourceAccessor {
/**
* Sets the {@code id} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code id} property is to be set.
* @param id the id value.
*/
void setId(ProxyResource proxyResource, String id);
/**
* Sets the {@code name} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code name} property is to be set.
* @param name the name value.
*/
void setName(ProxyResource proxyResource, String name);
/**
* Sets the {@code type} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code type} property is to be set.
* @param type the type value.
*/
void setType(ProxyResource proxyResource, String type);
}
/**
* The method called from {@link ProxyResource} to set it's accessor.
*
* @param proxyResourceAccessor the accessor.
*/
public static void setAccessor(final ProxyResourceAccessor proxyResourceAccessor) {
accessor = proxyResourceAccessor;
}
/**
* Sets the {@code id} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code id} property is to be set.
* @param id the id value.
*/
public static void setId(ProxyResource proxyResource, String id) {
ensureAccessor();
accessor.setId(proxyResource, id);
}
/**
* Sets the {@code name} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code name} property is to be set.
* @param name the name value.
*/
public static void setName(ProxyResource proxyResource, String name) {
ensureAccessor();
accessor.setName(proxyResource, name);
}
/**
* Sets the {@code type} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code type} property is to be set.
* @param type the type value.
*/
public static void setType(ProxyResource proxyResource, String type) {
ensureAccessor();
accessor.setType(proxyResource, type);
}
private ProxyResourceAccessHelper() {
}
} |
This is a forcing function to load `ProxyResource`. Since the design of accessors requires that the class being accessed is loaded, constructor calls like this could happen before the class is loaded and this forces the class to load. Since the static constructor in `ProxyResource` sets the `ProxyResourceAccessor accessor` field in `ProxyResourceAccessHelper` this check if the accessor is null and then creates a throw away instance of `ProxyResource` to for the static constructor to be called, leading to accessor not being null anymore. | private static void ensureAccessor() {
if (accessor == null) {
new ProxyResource();
}
} | new ProxyResource(); | private static void ensureAccessor() {
if (accessor == null) {
new ProxyResource();
}
} | class ProxyResourceAccessHelper {
private static ProxyResourceAccessor accessor;
/**
* Type defining the methods to set the non-public properties of a {@link ProxyResource} instance.
*/
public interface ProxyResourceAccessor {
/**
* Sets the {@code id} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code id} property is to be set.
* @param id the id value.
*/
void setId(ProxyResource proxyResource, String id);
/**
* Sets the {@code name} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code name} property is to be set.
* @param name the name value.
*/
void setName(ProxyResource proxyResource, String name);
/**
* Sets the {@code type} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code type} property is to be set.
* @param type the type value.
*/
void setType(ProxyResource proxyResource, String type);
}
/**
* The method called from {@link ProxyResource} to set it's accessor.
*
* @param proxyResourceAccessor the accessor.
*/
public static void setAccessor(final ProxyResourceAccessor proxyResourceAccessor) {
accessor = proxyResourceAccessor;
}
/**
* Sets the {@code id} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code id} property is to be set.
* @param id the id value.
*/
public static void setId(ProxyResource proxyResource, String id) {
ensureAccessor();
accessor.setId(proxyResource, id);
}
/**
* Sets the {@code name} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code name} property is to be set.
* @param name the name value.
*/
public static void setName(ProxyResource proxyResource, String name) {
ensureAccessor();
accessor.setName(proxyResource, name);
}
/**
* Sets the {@code type} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code type} property is to be set.
* @param type the type value.
*/
public static void setType(ProxyResource proxyResource, String type) {
ensureAccessor();
accessor.setType(proxyResource, type);
}
private ProxyResourceAccessHelper() {
}
} | class ProxyResourceAccessHelper {
private static ProxyResourceAccessor accessor;
/**
* Type defining the methods to set the non-public properties of a {@link ProxyResource} instance.
*/
public interface ProxyResourceAccessor {
/**
* Sets the {@code id} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code id} property is to be set.
* @param id the id value.
*/
void setId(ProxyResource proxyResource, String id);
/**
* Sets the {@code name} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code name} property is to be set.
* @param name the name value.
*/
void setName(ProxyResource proxyResource, String name);
/**
* Sets the {@code type} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code type} property is to be set.
* @param type the type value.
*/
void setType(ProxyResource proxyResource, String type);
}
/**
* The method called from {@link ProxyResource} to set it's accessor.
*
* @param proxyResourceAccessor the accessor.
*/
public static void setAccessor(final ProxyResourceAccessor proxyResourceAccessor) {
accessor = proxyResourceAccessor;
}
/**
* Sets the {@code id} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code id} property is to be set.
* @param id the id value.
*/
public static void setId(ProxyResource proxyResource, String id) {
ensureAccessor();
accessor.setId(proxyResource, id);
}
/**
* Sets the {@code name} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code name} property is to be set.
* @param name the name value.
*/
public static void setName(ProxyResource proxyResource, String name) {
ensureAccessor();
accessor.setName(proxyResource, name);
}
/**
* Sets the {@code type} property of the {@code proxyResource} instance.
*
* @param proxyResource the proxy resource instance whose {@code type} property is to be set.
* @param type the type value.
*/
public static void setType(ProxyResource proxyResource, String type) {
ensureAccessor();
accessor.setType(proxyResource, type);
}
private ProxyResourceAccessHelper() {
}
} |
Should add to the Javadocs that this will fail is local cryptography is being used. And do we have a way for inspecting whether local cryptography is being used? If not, we probably should. | public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return FluxUtil.monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode"));
}
} | new UnsupportedOperationException("Operation not supported when operating in local-only mode")); | return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final JsonWebKey jsonWebKey;
private final HttpPipeline pipeline;
private final LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
LocalKeyCryptographyClient localClient = null;
try {
localClient = retrieveJwkAndInitializeLocalClient(this.implClient);
} catch (RuntimeException e) {
LOGGER.info(
"Cannot perform cryptographic operations locally. Defaulting to service-side cryptography.", e);
}
if (localClient != null) {
this.jsonWebKey = localClient.getJsonWebKey();
this.localKeyCryptographyClient = localClient;
} else {
this.jsonWebKey = null;
this.localKeyCryptographyClient = null;
}
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.jsonWebKey = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The encrypt operation is missing permission/not supported for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
}
return implClient.encryptAsync(algorithm, plaintext, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The encrypt operation is missing permission/not supported for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
}
return implClient.encryptAsync(encryptParameters, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The decrypt operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
}
return implClient.decryptAsync(algorithm, ciphertext, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The decrypt operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
}
return implClient.decryptAsync(decryptParameters, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The sign operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
}
return implClient.signAsync(algorithm, digest, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The verify operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
}
return implClient.verifyAsync(algorithm, digest, signature, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The wrap Key operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
}
return implClient.wrapKeyAsync(algorithm, key, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The unwrap key operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The sign operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
}
return implClient.signDataAsync(algorithm, data, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(
"The verify operation is not allowed for key with id: " + this.jsonWebKey.getId())));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
}
return implClient.verifyDataAsync(algorithm, data, signature, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient = false;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(t -> {
if (isThrowableRetryable(t)) {
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography for this operation. "
+ "Defaulting to service-side cryptography.", t);
} else {
attemptedToInitializeLocalClient = true;
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography. Defaulting to"
+ "service-side cryptography for all operations.", t);
}
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono.handle((jsonWebKey, sink) -> {
if (!jsonWebKey.isValid()) {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
} else {
sink.next(initializeLocalClient(jsonWebKey, implClient));
}
});
} else {
return Mono.error(new IllegalStateException(
"Could not create a local cryptography client. Key collection is null or empty."));
}
}
} |
We don't modify the request, it looks safe to remove this line. | public HttpResponse send(HttpRequest httpRequest) {
originalRequest = httpRequest;
listener = httpRequest.getListener();
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection, httpRequest.getMetadata().isEagerlyReadResponse());
} | originalRequest = httpRequest; | public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private EventStreamListener listener;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]+$");
protected long lastEventID = -1;
protected long retryAfter;
private HttpRequest originalRequest;
private static final String TEXT_EVENT_STREAM = "text/event-stream";
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw new ConnectException("Invalid proxy address");
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @param eagerlyReadResponse Whether the response body should be eagerly read
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection, boolean eagerlyReadResponse) {
try {
int responseCode = connection.getResponseCode();
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
if (!eagerlyReadResponse) {
if (responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().contains(TEXT_EVENT_STREAM) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
processBuffer(reader);
} catch (IOException e) {
retryExceptionForSSE(e);
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
}
/**
* Processes the sse buffer and dispatches the event
* @param reader The BufferedReader object
* @throws IOException If an I/O error occurs
*/
private void processBuffer(BufferedReader reader) throws IOException {
StringBuilder sb = new StringBuilder(), data = new StringBuilder();
String eventStr = null;
long id = -1;
ServerSentEvent event = new ServerSentEvent();
int dataRead = reader.read();
while (dataRead != -1) {
sb.append((char) dataRead);
dataRead = reader.read();
int index;
while ((index = sb.indexOf("\n\n")) >= 0) {
String[] lines = sb.substring(0, index).split("\n");
sb.delete(0, index + 2);
boolean hasDataOrEvent = false;
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event = event.setComment(line.substring(1).trim());
}
String field = line.substring(0, idx), value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
eventStr = value;
hasDataOrEvent = true;
break;
case "data":
if (data.length() > 0) data.append("\n");
data.append(value);
hasDataOrEvent = true;
break;
case "id":
id = Long.parseLong(value);
lastEventID = id;
event = event.setId(id);
break;
case "retry":
if (DIGITS_ONLY.matcher(value).matches()) {
retryAfter = Long.parseLong(value);
}
event = event.setRetryAfter(retryAfter);
break;
default:
break;
}
}
if (hasDataOrEvent) {
dispatchEvent(data, eventStr, id, event);
}
data.setLength(0);
}
}
listener.onClose(event);
}
/**
* Dispatches the event to the listener
* @param data the "data" field of the server sent event
* @param eventStr the "event" field of the server sent event
* @param id the "id" field of the server sent event
* @param event the ServerSentEventSource object
*/
private void dispatchEvent(StringBuilder data, String eventStr, long id, ServerSentEvent event) {
if (eventStr == null) {
eventStr = DEFAULT_EVENT;
}
event = event.setEvent(eventStr);
if (id != -1) {
event = event.setId(id);
}
event = event.setData(data.toString());
listener.onEvent(event);
}
/**
* Retries the request if the listener allows it
* @param e The IOException that occurred
*/
private void retryExceptionForSSE(IOException e) {
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(e, retryAfter, lastEventID)) {
HttpRequest copyOriginalRequest = originalRequest;
if (lastEventID != -1) {
copyOriginalRequest.getHeaders().add(HeaderName.fromString("Last-Event-Id"),
String.valueOf(lastEventID));
}
try {
Thread.sleep(retryAfter);
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(copyOriginalRequest);
}
} else {
listener.onError(e);
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https": {
try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
}
case "http": {
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
}
}
throw new ProtocolException("Only HTTP and HTTPS are supported by this client.");
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH")
.append(" ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
This code is repeated in multiple places for different operations. We could extract this into a method to reduce duplication. | public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The sign operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
}
return implClient.signAsync(algorithm, digest, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | } | public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final JsonWebKey jsonWebKey;
private final HttpPipeline pipeline;
private final LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
LocalKeyCryptographyClient localClient = null;
try {
localClient = retrieveJwkAndInitializeLocalClient(this.implClient);
} catch (RuntimeException e) {
LOGGER.info(
"Cannot perform cryptographic operations locally. Defaulting to service-side cryptography.", e);
}
if (localClient != null) {
this.jsonWebKey = localClient.getJsonWebKey();
this.localKeyCryptographyClient = localClient;
} else {
this.jsonWebKey = null;
this.localKeyCryptographyClient = null;
}
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.jsonWebKey = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return FluxUtil.monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode"));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The encrypt operation is missing permission/not supported for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
}
return implClient.encryptAsync(algorithm, plaintext, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The encrypt operation is missing permission/not supported for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
}
return implClient.encryptAsync(encryptParameters, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The decrypt operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
}
return implClient.decryptAsync(algorithm, ciphertext, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The decrypt operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
}
return implClient.decryptAsync(decryptParameters, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The verify operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
}
return implClient.verifyAsync(algorithm, digest, signature, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The wrap Key operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
}
return implClient.wrapKeyAsync(algorithm, key, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The unwrap key operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The sign operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
}
return implClient.signDataAsync(algorithm, data, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(
"The verify operation is not allowed for key with id: " + this.jsonWebKey.getId())));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
}
return implClient.verifyDataAsync(algorithm, data, signature, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient = false;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(t -> {
if (isThrowableRetryable(t)) {
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography for this operation. "
+ "Defaulting to service-side cryptography.", t);
} else {
attemptedToInitializeLocalClient = true;
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography. Defaulting to"
+ "service-side cryptography for all operations.", t);
}
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono.handle((jsonWebKey, sink) -> {
if (!jsonWebKey.isValid()) {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
} else {
sink.next(initializeLocalClient(jsonWebKey, implClient));
}
});
} else {
return Mono.error(new IllegalStateException(
"Could not create a local cryptography client. Key collection is null or empty."));
}
}
} |
Agreed, good catch. We can tell when we're in local-only mode because the service client (`implClient`) is `null`. Said client only gets created when we're provided a valid Key Vault `keyId` at construction time. | public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return FluxUtil.monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode"));
}
} | new UnsupportedOperationException("Operation not supported when operating in local-only mode")); | return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final JsonWebKey jsonWebKey;
private final HttpPipeline pipeline;
private final LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
LocalKeyCryptographyClient localClient = null;
try {
localClient = retrieveJwkAndInitializeLocalClient(this.implClient);
} catch (RuntimeException e) {
LOGGER.info(
"Cannot perform cryptographic operations locally. Defaulting to service-side cryptography.", e);
}
if (localClient != null) {
this.jsonWebKey = localClient.getJsonWebKey();
this.localKeyCryptographyClient = localClient;
} else {
this.jsonWebKey = null;
this.localKeyCryptographyClient = null;
}
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.jsonWebKey = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The encrypt operation is missing permission/not supported for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
}
return implClient.encryptAsync(algorithm, plaintext, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The encrypt operation is missing permission/not supported for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
}
return implClient.encryptAsync(encryptParameters, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The decrypt operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
}
return implClient.decryptAsync(algorithm, ciphertext, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The decrypt operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
}
return implClient.decryptAsync(decryptParameters, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The sign operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
}
return implClient.signAsync(algorithm, digest, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The verify operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
}
return implClient.verifyAsync(algorithm, digest, signature, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The wrap Key operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
}
return implClient.wrapKeyAsync(algorithm, key, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The unwrap key operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(LOGGER.logExceptionAsError(
new UnsupportedOperationException(String.format(
"The sign operation is not allowed for key with id: %s",
this.jsonWebKey.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
}
return implClient.signDataAsync(algorithm, data, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> {
if (localKeyCryptographyClient != null) {
if (!checkKeyPermissions(this.jsonWebKey.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(
"The verify operation is not allowed for key with id: " + this.jsonWebKey.getId())));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
}
return implClient.verifyDataAsync(algorithm, data, signature, context);
});
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient = false;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(t -> {
if (isThrowableRetryable(t)) {
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography for this operation. "
+ "Defaulting to service-side cryptography.", t);
} else {
attemptedToInitializeLocalClient = true;
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography. Defaulting to"
+ "service-side cryptography for all operations.", t);
}
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono.handle((jsonWebKey, sink) -> {
if (!jsonWebKey.isValid()) {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
} else {
sink.next(initializeLocalClient(jsonWebKey, implClient));
}
});
} else {
return Mono.error(new IllegalStateException(
"Could not create a local cryptography client. Key collection is null or empty."));
}
}
} |
Are we going to fall back to service-side operations even if this error is transient? | private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
jsonWebKey = localKeyCryptographyClient.getJsonWebKey();
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(e -> {
LOGGER.info(
"Cannot perform cryptographic operations locally. Defaulting to service-side cryptography.", e);
attemptedToInitializeLocalClient = true;
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
} | .onErrorResume(e -> { | private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(t -> {
if (isThrowableRetryable(t)) {
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography for this operation. "
+ "Defaulting to service-side cryptography.", t);
} else {
attemptedToInitializeLocalClient = true;
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography. Defaulting to"
+ "service-side cryptography for all operations.", t);
}
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient;
private volatile JsonWebKey jsonWebKey;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.jsonWebKey = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.ENCRYPT);
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.ENCRYPT);
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.DECRYPT);
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.DECRYPT);
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.SIGN);
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.VERIFY);
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.WRAP_KEY);
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.UNWRAP_KEY);
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.SIGN);
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.VERIFY);
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono
.switchIfEmpty(Mono.error(new IllegalStateException(
"Could not retrieve JSON Web Key to perform local cryptographic operations.")))
.handle((jsonWebKey, sink) -> {
if (jsonWebKey.isValid()) {
sink.next(initializeLocalClient(jsonWebKey, implClient));
} else {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
}
});
}
return Mono.error(new IllegalStateException("Could not create a local cryptography client."));
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient = false;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono.handle((jsonWebKey, sink) -> {
if (!jsonWebKey.isValid()) {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
} else {
sink.next(initializeLocalClient(jsonWebKey, implClient));
}
});
} else {
return Mono.error(new IllegalStateException(
"Could not create a local cryptography client. Key collection is null or empty."));
}
}
} |
Good catch, I'll make it so we'll only set the `attemptedToInitializeLocalClient` flag to true only after initializing a local client or we find a non-repeatable error (e.g. a response with a `429` status code). | private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
jsonWebKey = localKeyCryptographyClient.getJsonWebKey();
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(e -> {
LOGGER.info(
"Cannot perform cryptographic operations locally. Defaulting to service-side cryptography.", e);
attemptedToInitializeLocalClient = true;
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
} | .onErrorResume(e -> { | private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(t -> {
if (isThrowableRetryable(t)) {
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography for this operation. "
+ "Defaulting to service-side cryptography.", t);
} else {
attemptedToInitializeLocalClient = true;
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography. Defaulting to"
+ "service-side cryptography for all operations.", t);
}
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient;
private volatile JsonWebKey jsonWebKey;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.jsonWebKey = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.ENCRYPT);
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.ENCRYPT);
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.DECRYPT);
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.DECRYPT);
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.SIGN);
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.VERIFY);
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.WRAP_KEY);
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.UNWRAP_KEY);
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.SIGN);
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.VERIFY);
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono
.switchIfEmpty(Mono.error(new IllegalStateException(
"Could not retrieve JSON Web Key to perform local cryptographic operations.")))
.handle((jsonWebKey, sink) -> {
if (jsonWebKey.isValid()) {
sink.next(initializeLocalClient(jsonWebKey, implClient));
} else {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
}
});
}
return Mono.error(new IllegalStateException("Could not create a local cryptography client."));
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient = false;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono.handle((jsonWebKey, sink) -> {
if (!jsonWebKey.isValid()) {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
} else {
sink.next(initializeLocalClient(jsonWebKey, implClient));
}
});
} else {
return Mono.error(new IllegalStateException(
"Could not create a local cryptography client. Key collection is null or empty."));
}
}
} |
I'll also update the message to show that we're only deferring the current operation to the service, not all of them. Should we log that on INFO or DEBUG? | private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
jsonWebKey = localKeyCryptographyClient.getJsonWebKey();
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(e -> {
LOGGER.info(
"Cannot perform cryptographic operations locally. Defaulting to service-side cryptography.", e);
attemptedToInitializeLocalClient = true;
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
} | .onErrorResume(e -> { | private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(t -> {
if (isThrowableRetryable(t)) {
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography for this operation. "
+ "Defaulting to service-side cryptography.", t);
} else {
attemptedToInitializeLocalClient = true;
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography. Defaulting to"
+ "service-side cryptography for all operations.", t);
}
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient;
private volatile JsonWebKey jsonWebKey;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.jsonWebKey = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.ENCRYPT);
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.ENCRYPT);
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.DECRYPT);
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.DECRYPT);
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.SIGN);
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.VERIFY);
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.WRAP_KEY);
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.UNWRAP_KEY);
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.SIGN);
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.VERIFY);
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono
.switchIfEmpty(Mono.error(new IllegalStateException(
"Could not retrieve JSON Web Key to perform local cryptographic operations.")))
.handle((jsonWebKey, sink) -> {
if (jsonWebKey.isValid()) {
sink.next(initializeLocalClient(jsonWebKey, implClient));
} else {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
}
});
}
return Mono.error(new IllegalStateException("Could not create a local cryptography client."));
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient = false;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono.handle((jsonWebKey, sink) -> {
if (!jsonWebKey.isValid()) {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
} else {
sink.next(initializeLocalClient(jsonWebKey, implClient));
}
});
} else {
return Mono.error(new IllegalStateException(
"Could not create a local cryptography client. Key collection is null or empty."));
}
}
} |
Always use the supplier overload for logging at verbose level `LOGGER.verbose(() -> "message")`. | private Mono<Boolean> isLocalClientAvailable() {
if (shouldAttemptToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
jsonWebKey = localKeyCryptographyClient.getJsonWebKey();
shouldAttemptToInitializeLocalClient = false;
return true;
})
.onErrorResume(e -> {
if (e instanceof HttpResponseException) {
int statusCode = ((HttpResponseException) e).getResponse().getStatusCode();
if (statusCode == 501 || statusCode == 505
|| (statusCode < 500 && statusCode != 408 && statusCode != 429)) {
shouldAttemptToInitializeLocalClient = false;
LOGGER.verbose("Could not set up local cryptography. Defaulting to service-side "
+ "cryptography for all operations.", e);
} else {
LOGGER.verbose("Could not set up local cryptography for this operation. Defaulting to "
+ "service-side cryptography.", e);
}
} else {
shouldAttemptToInitializeLocalClient = false;
LOGGER.verbose("Could not set up local cryptography. Defaulting to service-side cryptography "
+ "for all operations.", e);
}
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
} | LOGGER.verbose("Could not set up local cryptography. Defaulting to service-side " | private Mono<Boolean> isLocalClientAvailable() {
if (!attemptedToInitializeLocalClient) {
return retrieveJwkAndInitializeLocalAsyncClient()
.map(localClient -> {
localKeyCryptographyClient = localClient;
attemptedToInitializeLocalClient = true;
return true;
})
.onErrorResume(t -> {
if (isThrowableRetryable(t)) {
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography for this operation. "
+ "Defaulting to service-side cryptography.", t);
} else {
attemptedToInitializeLocalClient = true;
LOGGER.log(LogLevel.VERBOSE, () -> "Could not set up local cryptography. Defaulting to"
+ "service-side cryptography for all operations.", t);
}
return Mono.just(false);
});
}
return Mono.just(localKeyCryptographyClient != null);
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean shouldAttemptToInitializeLocalClient;
private volatile JsonWebKey jsonWebKey;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.jsonWebKey = jsonWebKey;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.shouldAttemptToInitializeLocalClient = false;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.ENCRYPT);
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* -->
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.ENCRYPT);
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.DECRYPT);
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* -->
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.DECRYPT);
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* -->
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.SIGN);
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.VERIFY);
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.WRAP_KEY);
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* -->
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.UNWRAP_KEY);
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.SIGN);
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end
* com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* -->
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
verifyKeyPermissions(jsonWebKey, KeyOperation.VERIFY);
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono.handle((jsonWebKey, sink) -> {
if (jsonWebKey.isValid()) {
sink.next(initializeLocalClient(jsonWebKey, implClient));
} else {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
}
});
} else {
return Mono.error(new IllegalStateException(
"Could not create a local cryptography client. Key collection is null or empty."));
}
}
} | class CryptographyAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class);
private final HttpPipeline pipeline;
private volatile boolean attemptedToInitializeLocalClient = false;
private volatile LocalKeyCryptographyClient localKeyCryptographyClient;
final CryptographyClientImpl implClient;
final String keyId;
/**
* Creates a {@link CryptographyAsyncClient} that uses a given {@link HttpPipeline pipeline} to service requests.
*
* @param keyId The Azure Key Vault key identifier to use for cryptography operations.
* @param pipeline {@link HttpPipeline} that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
this.keyId = keyId;
this.pipeline = pipeline;
this.implClient = new CryptographyClientImpl(keyId, pipeline, version);
}
/**
* Creates a {@link CryptographyAsyncClient} that uses a {@link JsonWebKey} to perform local cryptography
* operations.
*
* @param jsonWebKey The {@link JsonWebKey} to use for local cryptography operations.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey) {
Objects.requireNonNull(jsonWebKey, "The JSON Web Key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("The JSON Web Key is not valid.");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("The JSON Web Key's key operations property is not configured.");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("The JSON Web Key's key type property is not configured.");
}
this.implClient = null;
this.keyId = jsonWebKey.getId();
this.pipeline = null;
try {
this.localKeyCryptographyClient = initializeLocalClient(jsonWebKey, null);
this.attemptedToInitializeLocalClient = true;
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(
new RuntimeException("Could not initialize local cryptography client.", e));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
* <pre>
* cryptographyAsyncClient.getKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKey -->
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<KeyVaultKey> getKey() {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission for non-local operations.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
* <pre>
* cryptographyAsyncClient.getKeyWithResponse&
* .contextWrite&
* .subscribe&
* System.out.printf&
* keyResponse.getValue&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.getKeyWithResponse -->
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* requested {@link KeyVaultKey key}.
*
* @throws ResourceNotFoundException When the configured key doesn't exist in the key vault.
* @throws UnsupportedOperationException When operating in local-only mode (using a client created using a
* JsonWebKey instance).
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<KeyVaultKey>> getKeyWithResponse() {
if (implClient != null) {
try {
return implClient.getKeyAsync();
} catch (RuntimeException e) {
return monoError(LOGGER, e);
}
} else {
return monoError(LOGGER,
new UnsupportedOperationException("Operation not supported when operating in local-only mode."));
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(algorithm, plaintext, context);
} else {
return implClient.encryptAsync(algorithm, plaintext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports
* a single block of data, the size of which is dependent on the target key and the encryption algorithm to be
* used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys, the
* public portion of the key is used for encryption. This operation requires the {@code keys/encrypt} permission
* for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting
* the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
* <pre>
* byte[] plaintextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* EncryptParameters encryptParameters = EncryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.encrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* encryptResult.getCipherText&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
*
* @return A {@link Mono} containing a {@link EncryptResult} whose
* {@link EncryptResult
*
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.encryptAsync(encryptParameters, context);
} else {
return implClient.encryptAsync(encryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertext = new byte[100];
* new Random&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted. Microsoft recommends you not use CBC without first ensuring the
* integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* vulnerabilities with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(algorithm, ciphertext, context);
} else {
return implClient.decryptAsync(algorithm, ciphertext, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm
* to be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires
* the {@code keys/decrypt} permission for non-local operations.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting
* the specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
* <pre>
* byte[] ciphertextBytes = new byte[100];
* new Random&
* byte[] iv = &
* &
* &
* &
*
* DecryptParameters decryptParameters = DecryptParameters.createA128CbcParameters&
*
* cryptographyAsyncClient.decrypt&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation. Microsoft recommends you not use CBC
* without first ensuring the integrity of the ciphertext using an HMAC, for example.
* See <a href="https:
* with CBC-mode symmetric decryption using padding</a> for more information.
*
* @return A {@link Mono} containing the decrypted blob.
*
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.decryptAsync(decryptParameters, context);
} else {
return implClient.decryptAsync(decryptParameters, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
* <pre>
* byte[] data = new byte[100];
* new Random&
* MessageDigest md = MessageDigest.getInstance&
* md.update&
* byte[] digest = md.digest&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code digest} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signAsync(algorithm, digest, context);
} else {
return implClient.signAsync(algorithm, digest, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature. This operation
* requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
* <pre>
* byte[] myData = new byte[100];
* new Random&
* MessageDigest messageDigest = MessageDigest.getInstance&
* messageDigest.update&
* byte[] myDigest = messageDigest.digest&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature was created.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code digest} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context);
} else {
return implClient.verifyAsync(algorithm, digest, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the {@code keys/wrapKey} permission for non-local
* operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
* <pre>
* byte[] key = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* System.out.printf&
* wrapResult.getEncryptedKey&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
*
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context);
} else {
return implClient.wrapKeyAsync(algorithm, key, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation
* is the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap.
* This
* operation requires the {@code keys/unwrapKey} permission for non-local operations.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
* <p>
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when
* a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
* <pre>
* byte[] keyToWrap = new byte[100];
* new Random&
*
* cryptographyAsyncClient.wrapKey&
* .contextWrite&
* .subscribe&
* cryptographyAsyncClient.unwrapKey&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
*
* @return A {@link Mono} containing an {@link UnwrapResult} whose {@link UnwrapResult
* key} contains the unwrapped key result.
*
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context);
} else {
return implClient.unwrapKeyAsync(algorithm, encryptedKey, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the {@code keys/sign} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a
* response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
* <pre>
* byte[] data = new byte[100];
* new Random&
*
* cryptographyAsyncClient.sign&
* .contextWrite&
* .subscribe&
* System.out.printf&
* signResult.getSignature&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
*
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* contains the created signature.
*
* @throws NullPointerException If {@code algorithm} or {@code data} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for signing.
* @throws UnsupportedOperationException If the sign operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.signDataAsync(algorithm, data, context);
} else {
return implClient.signDataAsync(algorithm, data, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys. In case of asymmetric keys public portion of the key is used to verify the signature.
* This operation requires the {@code keys/verify} permission for non-local operations.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
*
* <!-- src_embed com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
* <pre>
* byte[] myData = new byte[100];
* new Random&
*
* &
* cryptographyAsyncClient.verify&
* .contextWrite&
* .subscribe&
* System.out.printf&
* </pre>
* <!-- end com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
*
* @return A {@link Mono} containing a {@link VerifyResult}
* {@link VerifyResult
*
* @throws NullPointerException If {@code algorithm}, {@code data} or {@code signature} is {@code null}.
* @throws ResourceNotFoundException If the key cannot be found for verifying.
* @throws UnsupportedOperationException If the verify operation is not supported or configured on the key.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> isLocalClientAvailable().flatMap(available -> {
if (available) {
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context);
} else {
return implClient.verifyDataAsync(algorithm, data, signature, context);
}
}));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
private Mono<LocalKeyCryptographyClient> retrieveJwkAndInitializeLocalAsyncClient() {
if (!CoreUtils.isNullOrEmpty(implClient.getKeyCollection())) {
Mono<JsonWebKey> jsonWebKeyMono = CryptographyUtils.SECRETS_COLLECTION.equals(implClient.getKeyCollection())
? implClient.getSecretKeyAsync()
: implClient.getKeyAsync().map(keyVaultKeyResponse -> keyVaultKeyResponse.getValue().getKey());
return jsonWebKeyMono.handle((jsonWebKey, sink) -> {
if (!jsonWebKey.isValid()) {
sink.error(new IllegalStateException("The retrieved JSON Web Key is not valid."));
} else {
sink.next(initializeLocalClient(jsonWebKey, implClient));
}
});
} else {
return Mono.error(new IllegalStateException(
"Could not create a local cryptography client. Key collection is null or empty."));
}
}
} |
This is to copy the original request since we don't have access to it in the retry method. | public HttpResponse send(HttpRequest httpRequest) {
originalRequest = httpRequest;
listener = httpRequest.getListener();
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection, httpRequest.getMetadata().isEagerlyReadResponse());
} | originalRequest = httpRequest; | public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private EventStreamListener listener;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]+$");
protected long lastEventID = -1;
protected long retryAfter;
private HttpRequest originalRequest;
private static final String TEXT_EVENT_STREAM = "text/event-stream";
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw new ConnectException("Invalid proxy address");
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @param eagerlyReadResponse Whether the response body should be eagerly read
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection, boolean eagerlyReadResponse) {
try {
int responseCode = connection.getResponseCode();
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
if (!eagerlyReadResponse) {
if (responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().contains(TEXT_EVENT_STREAM) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
processBuffer(reader);
} catch (IOException e) {
retryExceptionForSSE(e);
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
}
/**
* Processes the sse buffer and dispatches the event
* @param reader The BufferedReader object
* @throws IOException If an I/O error occurs
*/
private void processBuffer(BufferedReader reader) throws IOException {
StringBuilder sb = new StringBuilder(), data = new StringBuilder();
String eventStr = null;
long id = -1;
ServerSentEvent event = new ServerSentEvent();
int dataRead = reader.read();
while (dataRead != -1) {
sb.append((char) dataRead);
dataRead = reader.read();
int index;
while ((index = sb.indexOf("\n\n")) >= 0) {
String[] lines = sb.substring(0, index).split("\n");
sb.delete(0, index + 2);
boolean hasDataOrEvent = false;
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event = event.setComment(line.substring(1).trim());
}
String field = line.substring(0, idx), value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
eventStr = value;
hasDataOrEvent = true;
break;
case "data":
if (data.length() > 0) data.append("\n");
data.append(value);
hasDataOrEvent = true;
break;
case "id":
id = Long.parseLong(value);
lastEventID = id;
event = event.setId(id);
break;
case "retry":
if (DIGITS_ONLY.matcher(value).matches()) {
retryAfter = Long.parseLong(value);
}
event = event.setRetryAfter(retryAfter);
break;
default:
break;
}
}
if (hasDataOrEvent) {
dispatchEvent(data, eventStr, id, event);
}
data.setLength(0);
}
}
listener.onClose(event);
}
/**
* Dispatches the event to the listener
* @param data the "data" field of the server sent event
* @param eventStr the "event" field of the server sent event
* @param id the "id" field of the server sent event
* @param event the ServerSentEventSource object
*/
private void dispatchEvent(StringBuilder data, String eventStr, long id, ServerSentEvent event) {
if (eventStr == null) {
eventStr = DEFAULT_EVENT;
}
event = event.setEvent(eventStr);
if (id != -1) {
event = event.setId(id);
}
event = event.setData(data.toString());
listener.onEvent(event);
}
/**
* Retries the request if the listener allows it
* @param e The IOException that occurred
*/
private void retryExceptionForSSE(IOException e) {
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(e, retryAfter, lastEventID)) {
HttpRequest copyOriginalRequest = originalRequest;
if (lastEventID != -1) {
copyOriginalRequest.getHeaders().add(HeaderName.fromString("Last-Event-Id"),
String.valueOf(lastEventID));
}
try {
Thread.sleep(retryAfter);
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(copyOriginalRequest);
}
} else {
listener.onError(e);
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https": {
try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
}
case "http": {
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
}
}
throw new ProtocolException("Only HTTP and HTTPS are supported by this client.");
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH")
.append(" ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
This feels like it should be API on Headers, like we already have `Headers(Map<String, String>)`, we should have `Headers(Map<String, List<String>)` // @alzimmermsft ? | private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
} | } | private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
We should move these into the stream event block, so that we aren't paying for them for non-sse events | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
} | RetrySSEResult retrySSEResult; | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
What happens if this is a text event stream but the listener is `null`? Will we read the whole response into memory? | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
} | if (isTextEventStream(responseHeaders) && listener != null) { | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
We should close the reader. | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
} | BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream())); | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
This doesn't contain the response body? | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
} | return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders); | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
This method is called only when the errorStream is not null in `receiveResponse` method. So, we don't need to check for null here again. | private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
} | InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { | private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
How does BufferedReader.readLine() work when there are multiple line returns in a row? And are we better off tracking how many empty lines in a row we receive instead of this? | private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
} | return sb.indexOf("\n\n") >= 0; | private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream()))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData.setLength(0);
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? event.getRetryAfter() : null);
}
return null;
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
} else if (idx < 0) {
throw new IllegalArgumentException("Invalid data received from server");
}
String field = line.substring(0, idx).trim().toLowerCase();
String value = line.substring(idx + 1).trim();
switch (field) {
case "event":
event.setEvent(value);
break;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
break;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
event.setEvent(event.getEvent() == null ? DEFAULT_EVENT : event.getEvent());
if (eventData.length() != 0) {
event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
Throw `UncheckedIOException` when wrapping IOException. | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
} | throw LOGGER.logThrowableAsError(new RuntimeException(e)); | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
I don't think we need to include the load factor (`/ 0.75f` ) here. This should already be handled by the map implementation in Headers. | private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
} | Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); | private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
This will throw a NPE if Content-Type header is not present in the response. | private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
} | return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM); | private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
We should throw here if we get invalid data. | private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
} | continue; | private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
We should throw here instead of ignoring invalid data. | private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
} | continue; | private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
We could just log and let the user know that there was no listener attached and skip reading the data. As in-memory data without the listener won't reach the user anyway. | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
} | if (isTextEventStream(responseHeaders) && listener != null) { | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
Do we expect Stream event APIs to have a body isn't that what is read into stream data? | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
} | return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders); | private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
This was existing code, just moved around, but will update it. | private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F));
for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
} | Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); | private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
public static final String LAST_EVENT_ID = "Last-Event-Id";
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
RetrySSEResult retrySSEResult;
if (connection.getErrorStream() == null) {
if (isTextEventStream(responseHeaders) && listener != null) {
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
} finally {
connection.disconnect();
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
int dataRead = reader.read();
while (dataRead != -1) {
collectedData.append((char) dataRead);
dataRead = reader.read();
int index = isEndOfBlock(collectedData);
if (index >= 0) {
String[] lines = collectedData.substring(0, index).split("\n");
collectedData.delete(0, index + 2);
event = processLines(lines);
if (event != null
&& (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null)) {
listener.onEvent(event);
}
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event.getId(), event.getRetryAfter());
}
return null;
}
private int isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n");
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx < 0) {
continue;
} else if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx);
String value = line.substring(idx + 1).trim();
switch (field.trim().toLowerCase()) {
case "event":
event.setEvent(value);
continue;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
continue;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
continue;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Long.parseLong(value));
}
continue;
default:
continue;
}
}
if (event.getEvent() == null) {
event.setEvent(DEFAULT_EVENT);
}
if (eventData.length() != 0) {
event = event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
long lastEventId = retrySSEResult.getLastEventId();
long retryAfter = retrySSEResult.getRetryAfter();
if (!Thread.currentThread().isInterrupted() && listener.shouldRetry(retrySSEResult.getException(), retryAfter, lastEventId)) {
if (lastEventId != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID),
String.valueOf(lastEventId));
}
try {
if (retryAfter > 0) {
Thread.sleep(retryAfter);
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
} else {
listener.onError(retrySSEResult.getException());
}
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final long retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, long retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public long getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the
* connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output
* stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse
* from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (
BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending
* over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ")
.append(httpRequest.getUrl().getPath())
.append(HTTP_VERSION)
.append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList().forEach(value -> request.append(header.getName())
.append(": ")
.append(value)
.append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n")
.append(httpRequest.getBody().toString())
.append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information
* needed to construct an instance of HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
String[] kv = line.split(": ", 2);
String k = kv[0];
String v = kv[1];
headers.add(HeaderName.fromString(k), v);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
I think we'd be better off setting `collectedData` to a new StringBuilder instead of using `setLength(0)` | private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData.setLength(0);
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? event.getRetryAfter() : null);
}
return null;
} | collectedData.setLength(0); | private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream()))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
} else if (idx < 0) {
throw new IllegalArgumentException("Invalid data received from server");
}
String field = line.substring(0, idx).trim().toLowerCase();
String value = line.substring(idx + 1).trim();
switch (field) {
case "event":
event.setEvent(value);
break;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
break;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
event.setEvent(event.getEvent() == null ? DEFAULT_EVENT : event.getEvent());
if (eventData.length() != 0) {
event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
If we're going to split the collected data on new line and were writing collected data with new line, why don't we just store lines, `List<String> collectedDataLines` | private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData.setLength(0);
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? event.getRetryAfter() : null);
}
return null;
} | event = processLines(collectedData.toString().split("\n")); | private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream()))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
} else if (idx < 0) {
throw new IllegalArgumentException("Invalid data received from server");
}
String field = line.substring(0, idx).trim().toLowerCase();
String value = line.substring(idx + 1).trim();
switch (field) {
case "event":
event.setEvent(value);
break;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
break;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
event.setEvent(event.getEvent() == null ? DEFAULT_EVENT : event.getEvent());
if (eventData.length() != 0) {
event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
Based on the specification, a line missing a `:` is valid https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation | private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
} else if (idx < 0) {
throw new IllegalArgumentException("Invalid data received from server");
}
String field = line.substring(0, idx).trim().toLowerCase();
String value = line.substring(idx + 1).trim();
switch (field) {
case "event":
event.setEvent(value);
break;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
break;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
event.setEvent(event.getEvent() == null ? DEFAULT_EVENT : event.getEvent());
if (eventData.length() != 0) {
event.setData(eventData.toString());
}
return event;
} | throw new IllegalArgumentException("Invalid data received from server"); | private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream()))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData.setLength(0);
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? event.getRetryAfter() : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
``` `List<String> lines = new ArrayList<>(); ServerSentEvent event = null; try { String line; while ((line = reader.readLine()) != null) { lines.add(line); if (isEndOfBlock(String.join("\n", lines) + "\n")) { event = processLines(lines.toArray(new String[0]));` ``` It would be better to still check on the `\n` appending at the end of the line and `\n\n` to keep it consistent with the format specification of SSE. | private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData.setLength(0);
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? event.getRetryAfter() : null);
}
return null;
} | event = processLines(collectedData.toString().split("\n")); | private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream()))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
StringBuilder eventData = new StringBuilder();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
} else if (idx < 0) {
throw new IllegalArgumentException("Invalid data received from server");
}
String field = line.substring(0, idx).trim().toLowerCase();
String value = line.substring(idx + 1).trim();
switch (field) {
case "event":
event.setEvent(value);
break;
case "data":
if (eventData.length() > 0) {
eventData.append("\n");
}
eventData.append(value);
break;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
event.setEvent(event.getEvent() == null ? DEFAULT_EVENT : event.getEvent());
if (eventData.length() != 0) {
event.setData(eventData.toString());
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
Create this array lazily only if the event has a data field. | private ServerSentEvent processLines(String[] lines) {
List<String> eventData = new ArrayList<>();
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
event.setComment(line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
event.setEvent(value);
break;
case "data":
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
event.setId(Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
event.setRetryAfter(Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
event.setEvent(event.getEvent() == null ? DEFAULT_EVENT : event.getEvent());
if (!eventData.isEmpty()) {
event.setData(eventData);
}
return event;
} | List<String> eventData = new ArrayList<>(); | private ServerSentEvent processLines(String[] lines) {
List<String> eventData = null;
ServerSentEvent event = new ServerSentEvent();
for (String line : lines) {
int idx = line.indexOf(':');
if (idx == 0) {
ServerSentEventHelper.setComment(event, line.substring(1).trim());
continue;
}
String field = line.substring(0, idx < 0 ? lines.length : idx).trim().toLowerCase();
String value = idx < 0 ? "" : line.substring(idx + 1).trim();
switch (field) {
case "event":
ServerSentEventHelper.setEvent(event, value);
break;
case "data":
if(eventData == null) {
eventData = new ArrayList<>();
}
eventData.add(value);
break;
case "id":
if (!value.isEmpty()) {
ServerSentEventHelper.setId(event, Long.parseLong(value));
}
break;
case "retry":
if (!value.isEmpty() && DIGITS_ONLY.matcher(value).matches()) {
ServerSentEventHelper.setRetryAfter(event, Duration.ofMillis(Long.parseLong(value)));
}
break;
default:
throw new IllegalArgumentException("Invalid data received from server");
}
}
if (event.getEvent() == null) {
ServerSentEventHelper.setEvent(event, DEFAULT_EVENT);
}
if (eventData != null) {
ServerSentEventHelper.setData(event, eventData);
}
return event;
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? event.getRetryAfter() : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} | class DefaultHttpClient implements HttpClient {
private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class);
private final long connectionTimeout;
private final long readTimeout;
private final ProxyOptions proxyOptions;
private static final String LAST_EVENT_ID = "Last-Event-Id";
private static final String DEFAULT_EVENT = "message";
private static final Pattern DIGITS_ONLY = Pattern.compile("^[\\d]*$");
DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) {
this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis();
this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis();
this.proxyOptions = proxyOptions;
}
/**
* Synchronously send the HttpRequest.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
@Override
public HttpResponse send(HttpRequest httpRequest) {
if (httpRequest.getHttpMethod() == HttpMethod.PATCH) {
return sendPatchViaSocket(httpRequest);
}
HttpURLConnection connection = connect(httpRequest);
sendBody(httpRequest, connection);
return receiveResponse(httpRequest, connection);
}
/**
* Synchronously sends a PATCH request via a socket client.
*
* @param httpRequest The HTTP request being sent
* @return The HttpResponse object
*/
private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) {
try {
return SocketClient.sendPatchRequest(httpRequest);
} catch (IOException e) {
throw LOGGER.logThrowableAsWarning(new UncheckedIOException(e));
}
}
/**
* Open a connection based on the HttpRequest URL
*
* <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is
* specified in the 'Authorization' header.</p>
*
* @param httpRequest The HTTP Request being sent
* @return The HttpURLConnection object
*/
private HttpURLConnection connect(HttpRequest httpRequest) {
try {
HttpURLConnection connection;
URL url = httpRequest.getUrl();
if (proxyOptions != null) {
InetSocketAddress address = proxyOptions.getAddress();
if (address != null) {
Proxy proxy = new Proxy(Proxy.Type.HTTP, address);
connection = (HttpURLConnection) url.openConnection(proxy);
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword();
String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes());
connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc);
}
} else {
throw LOGGER.logThrowableAsWarning(new ConnectException("Invalid proxy address"));
}
} else {
connection = (HttpURLConnection) url.openConnection();
}
if (connectionTimeout != -1) {
connection.setConnectTimeout((int) connectionTimeout);
}
if (readTimeout != -1) {
connection.setReadTimeout((int) readTimeout);
}
try {
connection.setRequestMethod(httpRequest.getHttpMethod().toString());
} catch (ProtocolException e) {
throw LOGGER.logThrowableAsError(new RuntimeException(e));
}
for (Header header : httpRequest.getHeaders()) {
for (String value : header.getValues()) {
connection.addRequestProperty(header.getName(), value);
}
}
return connection;
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
/**
* Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance.
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection that is being sent to
*/
private void sendBody(HttpRequest httpRequest, HttpURLConnection connection) {
BinaryData body = httpRequest.getBody();
if (body == null) {
return;
}
HttpMethod method = httpRequest.getHttpMethod();
switch (httpRequest.getHttpMethod()) {
case GET:
case HEAD:
return;
case OPTIONS:
case TRACE:
case CONNECT:
case POST:
case PUT:
case DELETE:
connection.setDoOutput(true);
try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) {
body.writeTo(os);
os.flush();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return;
default:
throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method));
}
}
/**
* Receive the response from the remote server
*
* @param httpRequest The HTTP Request being sent
* @param connection The HttpURLConnection being sent to
* @return A HttpResponse object
*/
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) {
try {
int responseCode = connection.getResponseCode();
Headers responseHeaders = getResponseHeaders(connection);
ServerSentEventListener listener = httpRequest.getServerSentEventListener();
if (connection.getErrorStream() == null && isTextEventStream(responseHeaders)) {
if (listener != null) {
processTextEventStream(httpRequest, connection, listener);
} else {
LOGGER.log(ClientLogger.LogLevel.INFORMATIONAL, () -> "No listener attached to the server sent event" +
" http request. Treating response as regular response.");
}
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders);
} else {
AccessibleByteArrayOutputStream outputStream = getAccessibleByteArrayOutputStream(connection);
return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders,
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
} finally {
connection.disconnect();
}
}
private void processTextEventStream(HttpRequest httpRequest, HttpURLConnection connection, ServerSentEventListener listener) {
RetrySSEResult retrySSEResult;
try (BufferedReader reader
= new BufferedReader(new InputStreamReader(connection.getInputStream(), "UTF-8"))) {
retrySSEResult = processBuffer(reader, listener);
if (retrySSEResult != null) {
retryExceptionForSSE(retrySSEResult, listener, httpRequest);
}
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
}
private boolean isTextEventStream(Headers responseHeaders) {
return responseHeaders.get(HeaderName.CONTENT_TYPE) != null &&
responseHeaders.get(HeaderName.CONTENT_TYPE).getValue().equals(ContentType.TEXT_EVENT_STREAM);
}
/**
* Processes the sse buffer and dispatches the event
*
* @param reader The BufferedReader object
* @param listener The listener object attached with the httpRequest
*/
private RetrySSEResult processBuffer(BufferedReader reader, ServerSentEventListener listener) {
StringBuilder collectedData = new StringBuilder();
ServerSentEvent event = null;
try {
String line;
while ((line = reader.readLine()) != null) {
collectedData.append(line).append("\n");
if (isEndOfBlock(collectedData)) {
event = processLines(collectedData.toString().split("\n"));
if (!Objects.equals(event.getEvent(), DEFAULT_EVENT) || event.getData() != null) {
listener.onEvent(event);
}
collectedData = new StringBuilder();
}
}
listener.onClose();
} catch (IOException e) {
return new RetrySSEResult(e, event != null ? event.getId() : -1, event != null ? ServerSentEventHelper.getRetryAfter(event) : null);
}
return null;
}
private boolean isEndOfBlock(StringBuilder sb) {
return sb.indexOf("\n\n") >= 0;
}
/**
* Retries the request if the listener allows it
*
* @param retrySSEResult the result of the retry
* @param listener The listener object attached with the httpRequest
* @param httpRequest the HTTP Request being sent
*/
private void retryExceptionForSSE(RetrySSEResult retrySSEResult, ServerSentEventListener listener, HttpRequest httpRequest) {
if (Thread.currentThread().isInterrupted() || !listener.shouldRetry(retrySSEResult.getException(), retrySSEResult.getRetryAfter(), retrySSEResult.getLastEventId())) {
listener.onError(retrySSEResult.getException());
return;
}
if (retrySSEResult.getLastEventId() != -1) {
httpRequest.getHeaders().add(HeaderName.fromString(LAST_EVENT_ID), String.valueOf(retrySSEResult.getLastEventId()));
}
try {
if (retrySSEResult.getRetryAfter() != null) {
Thread.sleep(retrySSEResult.getRetryAfter().toMillis());
}
} catch (InterruptedException ignored) {
return;
}
if (!Thread.currentThread().isInterrupted()) {
this.send(httpRequest);
}
}
private Headers getResponseHeaders(HttpURLConnection connection) {
Map<String, List<String>> hucHeaders = connection.getHeaderFields();
Headers responseHeaders = new Headers(hucHeaders.size());
for (Map.Entry<String, List<String>> entry : hucHeaders.entrySet()) {
if (entry.getKey() != null) {
responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue());
}
}
return responseHeaders;
}
private static AccessibleByteArrayOutputStream getAccessibleByteArrayOutputStream(HttpURLConnection connection) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
try (InputStream errorStream = connection.getErrorStream();
InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) {
byte[] buffer = new byte[8192];
int length;
while ((length = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, length);
}
}
return outputStream;
}
/**
* Inner class to hold the result for a retry of an SSE request
*/
private static class RetrySSEResult {
private final long lastEventId;
private final Duration retryAfter;
private final IOException ioException;
public RetrySSEResult(IOException e, long lastEventId, Duration retryAfter) {
this.ioException = e;
this.lastEventId = lastEventId;
this.retryAfter = retryAfter;
}
public long getLastEventId() {
return lastEventId;
}
public Duration getRetryAfter() {
return retryAfter;
}
public IOException getException() {
return ioException;
}
}
private static class SocketClient {
private static final String HTTP_VERSION = " HTTP/1.1";
private static final SSLSocketFactory SSL_SOCKET_FACTORY = (SSLSocketFactory) SSLSocketFactory.getDefault();
/**
* Opens a socket connection, then writes the PATCH request across the connection and reads the response
*
* @param httpRequest The HTTP Request being sent
* @return an instance of HttpUrlConnectionResponse
* @throws ProtocolException If the protocol is not HTTP or HTTPS
* @throws IOException If an I/O error occurs
*/
public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException {
final URL requestUrl = httpRequest.getUrl();
final String protocol = requestUrl.getProtocol();
final String host = requestUrl.getHost();
final int port = requestUrl.getPort();
switch (protocol) {
case "https":
try (SSLSocket socket = (SSLSocket) SSL_SOCKET_FACTORY.createSocket(host, port)) {
return doInputOutput(httpRequest, socket);
}
case "http":
try (Socket socket = new Socket(host, port)) {
return doInputOutput(httpRequest, socket);
}
default:
throw LOGGER.logThrowableAsWarning(
new ProtocolException("Only HTTP and HTTPS are supported by this client."));
}
}
/**
* Calls buildAndSend to send a String representation of the request across the output stream, then calls
* buildResponse to get an instance of HttpUrlConnectionResponse from the input stream
*
* @param httpRequest The HTTP Request being sent
* @param socket An instance of the SocketClient
* @return an instance of HttpUrlConnectionResponse
*/
@SuppressWarnings("deprecation")
private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket)
throws IOException {
httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost());
if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) {
httpRequest.setHeader(HeaderName.CONNECTION, "close");
}
try (BufferedReader in = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) {
buildAndSend(httpRequest, out);
DefaultHttpClientResponse response = buildResponse(httpRequest, in);
Header locationHeader = response.getHeaders().get(HeaderName.LOCATION);
String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue();
if (redirectLocation != null) {
if (redirectLocation.startsWith("http")) {
httpRequest.setUrl(redirectLocation);
} else {
httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation));
}
return sendPatchRequest(httpRequest);
}
return response;
}
}
/**
* Converts an instance of HttpRequest to a String representation for sending over the output stream
*
* @param httpRequest The HTTP Request being sent
* @param out output stream for writing the request
* @throws IOException If an I/O error occurs
*/
private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException {
final StringBuilder request = new StringBuilder();
request.append("PATCH ").append(httpRequest.getUrl().getPath()).append(HTTP_VERSION).append("\r\n");
if (httpRequest.getHeaders().getSize() > 0) {
for (Header header : httpRequest.getHeaders()) {
header.getValuesList()
.forEach(value -> request.append(header.getName()).append(':').append(value).append("\r\n"));
}
}
if (httpRequest.getBody() != null) {
request.append("\r\n").append(httpRequest.getBody().toString()).append("\r\n");
}
out.write(request.toString());
out.flush();
}
/**
* Reads the response from the input stream and extracts the information needed to construct an instance of
* HttpUrlConnectionResponse
*
* @param httpRequest The HTTP Request being sent
* @param reader the input stream from the socket
* @return an instance of HttpUrlConnectionResponse
* @throws IOException If an I/O error occurs
*/
private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader)
throws IOException {
String statusLine = reader.readLine();
int dotIndex = statusLine.indexOf('.');
int statusCode = Integer.parseInt(statusLine.substring(dotIndex + 3, dotIndex + 6));
Headers headers = new Headers();
String line;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
int split = line.indexOf(':');
String key = line.substring(0, split);
String value = line.substring(split + 1).trim();
headers.add(HeaderName.fromString(key), value);
}
StringBuilder bodyString = new StringBuilder();
while ((line = reader.readLine()) != null) {
bodyString.append(line);
}
BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes()));
return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body);
}
}
} |
remove the protocol from here since removed in the log.info? | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init().thenEmpty((publisher) -> {
logger.info(
"Initialized DocumentClient [{}] with machineId[{}]"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
clientId,
ClientTelemetry.getMachineId(diagnosticsClientConfig),
serviceEndpoint,
connectionPolicy,
consistencyLevel,
configs.getProtocol());
}).subscribe();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | configs.getProtocol()); | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init().thenEmpty((publisher) -> {
logger.info(
"Initialized DocumentClient [{}] with machineId[{}]"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
clientId,
ClientTelemetry.getMachineId(diagnosticsClientConfig),
serviceEndpoint,
connectionPolicy,
consistencyLevel);
}).subscribe();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics =
diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
this.mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError();
if (databaseRefreshErrorSnapshot != null) {
logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot
);
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot);
} else {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
}
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return ClientTelemetry.getMachineId(diagnosticsClientConfig);
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return mostRecentlyCreatedDiagnostics.get();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsClientContext);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnosticsForQuery(
CosmosQueryRequestOptions requestOptions,
CosmosException exception,
DiagnosticsClientContext diagnosticsClientContext) {
CosmosDiagnostics mostRecentlyCreatedDiagnostics =
diagnosticsClientContext.getMostRecentlyCreatedDiagnostics();
if (mostRecentlyCreatedDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(
exception,
mostRecentlyCreatedDiagnostics);
} else {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout,
DiagnosticsClientContext diagnosticsClientContext) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(
requestOptions, cancellationException, diagnosticsClientContext);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
return requestObs
.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout(
RequestOptions requestOptions,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono,
ScopedDiagnosticsFactory scopedDiagnosticsFactory) {
requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (latestCosmosDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout));
}
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationExceptionForPointOperations(
scopedDiagnosticsFactory,
throwable,
requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook()));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationExceptionForPointOperations(
ScopedDiagnosticsFactory scopedDiagnosticsFactory,
Throwable throwable,
AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get();
if (actualCallback != null) {
logger.trace("Calling actual Mark E2E timeout callback");
actualCallback.run();
}
CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (lastDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics());
return exception;
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (cosmosDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
finalRetryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs =
getCreateDocumentRequest(
retryPolicyInstance,
collectionLink,
document,
options,
disableAutomaticIdGeneration,
OperationType.Upsert,
clientContextOverride);
return reqObs
.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
nonNullRequestOptions,
finalRequestRetryPolicy,
endToEndPolicyConfig,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs
.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
nonNullRequestOptions,
documentClientRetryPolicy,
scopedDiagnosticsFactory),
documentClientRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs
.flatMap(req -> patch(request, retryPolicyInstance))
.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
nonNullRequestOptions,
requestRetryPolicy,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
return requestObs
.flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(
String documentLink,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> readDocumentInternal(
documentLink,
nonNullRequestOptions,
retryPolicyInstance,
scopedDiagnosticsFactory),
retryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(
String documentLink,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req ->
this.read(request, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsFactory);
}
return feedResponseFlux;
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx));
} else {
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
diagnosticsFactory,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink, forceRefresh),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(
RxDocumentServiceRequest request,
String collectionLink,
boolean forceRefresh) {
logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES,
forceRefresh,
null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return this.mostRecentlyCreatedDiagnostics.get();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics =
diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
this.mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError();
if (databaseRefreshErrorSnapshot != null) {
logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot
);
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot);
} else {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
}
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return ClientTelemetry.getMachineId(diagnosticsClientConfig);
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return mostRecentlyCreatedDiagnostics.get();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsClientContext);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnosticsForQuery(
CosmosQueryRequestOptions requestOptions,
CosmosException exception,
DiagnosticsClientContext diagnosticsClientContext) {
CosmosDiagnostics mostRecentlyCreatedDiagnostics =
diagnosticsClientContext.getMostRecentlyCreatedDiagnostics();
if (mostRecentlyCreatedDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(
exception,
mostRecentlyCreatedDiagnostics);
} else {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout,
DiagnosticsClientContext diagnosticsClientContext) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(
requestOptions, cancellationException, diagnosticsClientContext);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
return requestObs
.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout(
RequestOptions requestOptions,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono,
ScopedDiagnosticsFactory scopedDiagnosticsFactory) {
requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (latestCosmosDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout));
}
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationExceptionForPointOperations(
scopedDiagnosticsFactory,
throwable,
requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook()));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationExceptionForPointOperations(
ScopedDiagnosticsFactory scopedDiagnosticsFactory,
Throwable throwable,
AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get();
if (actualCallback != null) {
logger.trace("Calling actual Mark E2E timeout callback");
actualCallback.run();
}
CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (lastDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics());
return exception;
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (cosmosDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
finalRetryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs =
getCreateDocumentRequest(
retryPolicyInstance,
collectionLink,
document,
options,
disableAutomaticIdGeneration,
OperationType.Upsert,
clientContextOverride);
return reqObs
.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
nonNullRequestOptions,
finalRequestRetryPolicy,
endToEndPolicyConfig,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs
.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
nonNullRequestOptions,
documentClientRetryPolicy,
scopedDiagnosticsFactory),
documentClientRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs
.flatMap(req -> patch(request, retryPolicyInstance))
.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
nonNullRequestOptions,
requestRetryPolicy,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
return requestObs
.flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(
String documentLink,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> readDocumentInternal(
documentLink,
nonNullRequestOptions,
retryPolicyInstance,
scopedDiagnosticsFactory),
retryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(
String documentLink,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req ->
this.read(request, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsFactory);
}
return feedResponseFlux;
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx));
} else {
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
diagnosticsFactory,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink, forceRefresh),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(
RxDocumentServiceRequest request,
String collectionLink,
boolean forceRefresh) {
logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES,
forceRefresh,
null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return this.mostRecentlyCreatedDiagnostics.get();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} |
when client telemetry is enabled, this log may never got logged. `sendClientTelemetry` will repeated calling itself, so the thenEmpty may never be called (I think) | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init().thenEmpty((publisher) -> {
logger.info(
"Initialized DocumentClient [{}] with machineId[{}]"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
clientId,
ClientTelemetry.getMachineId(diagnosticsClientConfig),
serviceEndpoint,
connectionPolicy,
consistencyLevel,
configs.getProtocol());
}).subscribe();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | logger.info( | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init().thenEmpty((publisher) -> {
logger.info(
"Initialized DocumentClient [{}] with machineId[{}]"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
clientId,
ClientTelemetry.getMachineId(diagnosticsClientConfig),
serviceEndpoint,
connectionPolicy,
consistencyLevel);
}).subscribe();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics =
diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
this.mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError();
if (databaseRefreshErrorSnapshot != null) {
logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot
);
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot);
} else {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
}
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return ClientTelemetry.getMachineId(diagnosticsClientConfig);
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return mostRecentlyCreatedDiagnostics.get();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsClientContext);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnosticsForQuery(
CosmosQueryRequestOptions requestOptions,
CosmosException exception,
DiagnosticsClientContext diagnosticsClientContext) {
CosmosDiagnostics mostRecentlyCreatedDiagnostics =
diagnosticsClientContext.getMostRecentlyCreatedDiagnostics();
if (mostRecentlyCreatedDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(
exception,
mostRecentlyCreatedDiagnostics);
} else {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout,
DiagnosticsClientContext diagnosticsClientContext) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(
requestOptions, cancellationException, diagnosticsClientContext);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
return requestObs
.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout(
RequestOptions requestOptions,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono,
ScopedDiagnosticsFactory scopedDiagnosticsFactory) {
requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (latestCosmosDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout));
}
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationExceptionForPointOperations(
scopedDiagnosticsFactory,
throwable,
requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook()));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationExceptionForPointOperations(
ScopedDiagnosticsFactory scopedDiagnosticsFactory,
Throwable throwable,
AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get();
if (actualCallback != null) {
logger.trace("Calling actual Mark E2E timeout callback");
actualCallback.run();
}
CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (lastDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics());
return exception;
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (cosmosDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
finalRetryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs =
getCreateDocumentRequest(
retryPolicyInstance,
collectionLink,
document,
options,
disableAutomaticIdGeneration,
OperationType.Upsert,
clientContextOverride);
return reqObs
.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
nonNullRequestOptions,
finalRequestRetryPolicy,
endToEndPolicyConfig,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs
.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
nonNullRequestOptions,
documentClientRetryPolicy,
scopedDiagnosticsFactory),
documentClientRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs
.flatMap(req -> patch(request, retryPolicyInstance))
.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
nonNullRequestOptions,
requestRetryPolicy,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
return requestObs
.flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(
String documentLink,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> readDocumentInternal(
documentLink,
nonNullRequestOptions,
retryPolicyInstance,
scopedDiagnosticsFactory),
retryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(
String documentLink,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req ->
this.read(request, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsFactory);
}
return feedResponseFlux;
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx));
} else {
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
diagnosticsFactory,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink, forceRefresh),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(
RxDocumentServiceRequest request,
String collectionLink,
boolean forceRefresh) {
logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES,
forceRefresh,
null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return this.mostRecentlyCreatedDiagnostics.get();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics =
diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
this.mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError();
if (databaseRefreshErrorSnapshot != null) {
logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot
);
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot);
} else {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
}
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return ClientTelemetry.getMachineId(diagnosticsClientConfig);
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return mostRecentlyCreatedDiagnostics.get();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsClientContext);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnosticsForQuery(
CosmosQueryRequestOptions requestOptions,
CosmosException exception,
DiagnosticsClientContext diagnosticsClientContext) {
CosmosDiagnostics mostRecentlyCreatedDiagnostics =
diagnosticsClientContext.getMostRecentlyCreatedDiagnostics();
if (mostRecentlyCreatedDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(
exception,
mostRecentlyCreatedDiagnostics);
} else {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout,
DiagnosticsClientContext diagnosticsClientContext) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(
requestOptions, cancellationException, diagnosticsClientContext);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
return requestObs
.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout(
RequestOptions requestOptions,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono,
ScopedDiagnosticsFactory scopedDiagnosticsFactory) {
requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (latestCosmosDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout));
}
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationExceptionForPointOperations(
scopedDiagnosticsFactory,
throwable,
requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook()));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationExceptionForPointOperations(
ScopedDiagnosticsFactory scopedDiagnosticsFactory,
Throwable throwable,
AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get();
if (actualCallback != null) {
logger.trace("Calling actual Mark E2E timeout callback");
actualCallback.run();
}
CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (lastDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics());
return exception;
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (cosmosDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
finalRetryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs =
getCreateDocumentRequest(
retryPolicyInstance,
collectionLink,
document,
options,
disableAutomaticIdGeneration,
OperationType.Upsert,
clientContextOverride);
return reqObs
.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
nonNullRequestOptions,
finalRequestRetryPolicy,
endToEndPolicyConfig,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs
.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
nonNullRequestOptions,
documentClientRetryPolicy,
scopedDiagnosticsFactory),
documentClientRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs
.flatMap(req -> patch(request, retryPolicyInstance))
.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
nonNullRequestOptions,
requestRetryPolicy,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
return requestObs
.flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(
String documentLink,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> readDocumentInternal(
documentLink,
nonNullRequestOptions,
retryPolicyInstance,
scopedDiagnosticsFactory),
retryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(
String documentLink,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req ->
this.read(request, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsFactory);
}
return feedResponseFlux;
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx));
} else {
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
diagnosticsFactory,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink, forceRefresh),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(
RxDocumentServiceRequest request,
String collectionLink,
boolean forceRefresh) {
logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES,
forceRefresh,
null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return this.mostRecentlyCreatedDiagnostics.get();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} |
Fixed | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init().thenEmpty((publisher) -> {
logger.info(
"Initialized DocumentClient [{}] with machineId[{}]"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
clientId,
ClientTelemetry.getMachineId(diagnosticsClientConfig),
serviceEndpoint,
connectionPolicy,
consistencyLevel,
configs.getProtocol());
}).subscribe();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | logger.info( | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init().thenEmpty((publisher) -> {
logger.info(
"Initialized DocumentClient [{}] with machineId[{}]"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
clientId,
ClientTelemetry.getMachineId(diagnosticsClientConfig),
serviceEndpoint,
connectionPolicy,
consistencyLevel);
}).subscribe();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics =
diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
this.mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError();
if (databaseRefreshErrorSnapshot != null) {
logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot
);
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot);
} else {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
}
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return ClientTelemetry.getMachineId(diagnosticsClientConfig);
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return mostRecentlyCreatedDiagnostics.get();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsClientContext);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnosticsForQuery(
CosmosQueryRequestOptions requestOptions,
CosmosException exception,
DiagnosticsClientContext diagnosticsClientContext) {
CosmosDiagnostics mostRecentlyCreatedDiagnostics =
diagnosticsClientContext.getMostRecentlyCreatedDiagnostics();
if (mostRecentlyCreatedDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(
exception,
mostRecentlyCreatedDiagnostics);
} else {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout,
DiagnosticsClientContext diagnosticsClientContext) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(
requestOptions, cancellationException, diagnosticsClientContext);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
return requestObs
.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout(
RequestOptions requestOptions,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono,
ScopedDiagnosticsFactory scopedDiagnosticsFactory) {
requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (latestCosmosDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout));
}
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationExceptionForPointOperations(
scopedDiagnosticsFactory,
throwable,
requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook()));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationExceptionForPointOperations(
ScopedDiagnosticsFactory scopedDiagnosticsFactory,
Throwable throwable,
AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get();
if (actualCallback != null) {
logger.trace("Calling actual Mark E2E timeout callback");
actualCallback.run();
}
CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (lastDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics());
return exception;
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (cosmosDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
finalRetryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs =
getCreateDocumentRequest(
retryPolicyInstance,
collectionLink,
document,
options,
disableAutomaticIdGeneration,
OperationType.Upsert,
clientContextOverride);
return reqObs
.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
nonNullRequestOptions,
finalRequestRetryPolicy,
endToEndPolicyConfig,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs
.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
nonNullRequestOptions,
documentClientRetryPolicy,
scopedDiagnosticsFactory),
documentClientRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs
.flatMap(req -> patch(request, retryPolicyInstance))
.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
nonNullRequestOptions,
requestRetryPolicy,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
return requestObs
.flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(
String documentLink,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> readDocumentInternal(
documentLink,
nonNullRequestOptions,
retryPolicyInstance,
scopedDiagnosticsFactory),
retryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(
String documentLink,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req ->
this.read(request, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsFactory);
}
return feedResponseFlux;
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx));
} else {
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
diagnosticsFactory,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink, forceRefresh),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(
RxDocumentServiceRequest request,
String collectionLink,
boolean forceRefresh) {
logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES,
forceRefresh,
null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return this.mostRecentlyCreatedDiagnostics.get();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics =
diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
this.mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError();
if (databaseRefreshErrorSnapshot != null) {
logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot
);
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot);
} else {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
}
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return ClientTelemetry.getMachineId(diagnosticsClientConfig);
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return mostRecentlyCreatedDiagnostics.get();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsClientContext);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnosticsForQuery(
CosmosQueryRequestOptions requestOptions,
CosmosException exception,
DiagnosticsClientContext diagnosticsClientContext) {
CosmosDiagnostics mostRecentlyCreatedDiagnostics =
diagnosticsClientContext.getMostRecentlyCreatedDiagnostics();
if (mostRecentlyCreatedDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(
exception,
mostRecentlyCreatedDiagnostics);
} else {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout,
DiagnosticsClientContext diagnosticsClientContext) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(
requestOptions, cancellationException, diagnosticsClientContext);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
return requestObs
.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout(
RequestOptions requestOptions,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono,
ScopedDiagnosticsFactory scopedDiagnosticsFactory) {
requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (latestCosmosDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout));
}
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationExceptionForPointOperations(
scopedDiagnosticsFactory,
throwable,
requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook()));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationExceptionForPointOperations(
ScopedDiagnosticsFactory scopedDiagnosticsFactory,
Throwable throwable,
AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get();
if (actualCallback != null) {
logger.trace("Calling actual Mark E2E timeout callback");
actualCallback.run();
}
CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (lastDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics());
return exception;
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (cosmosDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
finalRetryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs =
getCreateDocumentRequest(
retryPolicyInstance,
collectionLink,
document,
options,
disableAutomaticIdGeneration,
OperationType.Upsert,
clientContextOverride);
return reqObs
.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
nonNullRequestOptions,
finalRequestRetryPolicy,
endToEndPolicyConfig,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs
.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
nonNullRequestOptions,
documentClientRetryPolicy,
scopedDiagnosticsFactory),
documentClientRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs
.flatMap(req -> patch(request, retryPolicyInstance))
.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
nonNullRequestOptions,
requestRetryPolicy,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
return requestObs
.flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(
String documentLink,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> readDocumentInternal(
documentLink,
nonNullRequestOptions,
retryPolicyInstance,
scopedDiagnosticsFactory),
retryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(
String documentLink,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req ->
this.read(request, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsFactory);
}
return feedResponseFlux;
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx));
} else {
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
diagnosticsFactory,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink, forceRefresh),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(
RxDocumentServiceRequest request,
String collectionLink,
boolean forceRefresh) {
logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES,
forceRefresh,
null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return this.mostRecentlyCreatedDiagnostics.get();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} |
Fixed | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init().thenEmpty((publisher) -> {
logger.info(
"Initialized DocumentClient [{}] with machineId[{}]"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
clientId,
ClientTelemetry.getMachineId(diagnosticsClientConfig),
serviceEndpoint,
connectionPolicy,
consistencyLevel,
configs.getProtocol());
}).subscribe();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | configs.getProtocol()); | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init().thenEmpty((publisher) -> {
logger.info(
"Initialized DocumentClient [{}] with machineId[{}]"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
clientId,
ClientTelemetry.getMachineId(diagnosticsClientConfig),
serviceEndpoint,
connectionPolicy,
consistencyLevel);
}).subscribe();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics =
diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
this.mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError();
if (databaseRefreshErrorSnapshot != null) {
logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot
);
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot);
} else {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
}
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return ClientTelemetry.getMachineId(diagnosticsClientConfig);
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return mostRecentlyCreatedDiagnostics.get();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsClientContext);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnosticsForQuery(
CosmosQueryRequestOptions requestOptions,
CosmosException exception,
DiagnosticsClientContext diagnosticsClientContext) {
CosmosDiagnostics mostRecentlyCreatedDiagnostics =
diagnosticsClientContext.getMostRecentlyCreatedDiagnostics();
if (mostRecentlyCreatedDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(
exception,
mostRecentlyCreatedDiagnostics);
} else {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout,
DiagnosticsClientContext diagnosticsClientContext) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(
requestOptions, cancellationException, diagnosticsClientContext);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
return requestObs
.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout(
RequestOptions requestOptions,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono,
ScopedDiagnosticsFactory scopedDiagnosticsFactory) {
requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (latestCosmosDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout));
}
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationExceptionForPointOperations(
scopedDiagnosticsFactory,
throwable,
requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook()));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationExceptionForPointOperations(
ScopedDiagnosticsFactory scopedDiagnosticsFactory,
Throwable throwable,
AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get();
if (actualCallback != null) {
logger.trace("Calling actual Mark E2E timeout callback");
actualCallback.run();
}
CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (lastDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics());
return exception;
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (cosmosDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
finalRetryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs =
getCreateDocumentRequest(
retryPolicyInstance,
collectionLink,
document,
options,
disableAutomaticIdGeneration,
OperationType.Upsert,
clientContextOverride);
return reqObs
.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
nonNullRequestOptions,
finalRequestRetryPolicy,
endToEndPolicyConfig,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs
.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
nonNullRequestOptions,
documentClientRetryPolicy,
scopedDiagnosticsFactory),
documentClientRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs
.flatMap(req -> patch(request, retryPolicyInstance))
.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
nonNullRequestOptions,
requestRetryPolicy,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
return requestObs
.flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(
String documentLink,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> readDocumentInternal(
documentLink,
nonNullRequestOptions,
retryPolicyInstance,
scopedDiagnosticsFactory),
retryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(
String documentLink,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req ->
this.read(request, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsFactory);
}
return feedResponseFlux;
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx));
} else {
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
diagnosticsFactory,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink, forceRefresh),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(
RxDocumentServiceRequest request,
String collectionLink,
boolean forceRefresh) {
logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES,
forceRefresh,
null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return this.mostRecentlyCreatedDiagnostics.get();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics =
diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
this.mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError();
if (databaseRefreshErrorSnapshot != null) {
logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot
);
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
databaseRefreshErrorSnapshot);
} else {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token "
+ "is valid. More info: https:
}
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return ClientTelemetry.getMachineId(diagnosticsClientConfig);
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return mostRecentlyCreatedDiagnostics.get();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsClientContext);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnosticsForQuery(
CosmosQueryRequestOptions requestOptions,
CosmosException exception,
DiagnosticsClientContext diagnosticsClientContext) {
CosmosDiagnostics mostRecentlyCreatedDiagnostics =
diagnosticsClientContext.getMostRecentlyCreatedDiagnostics();
if (mostRecentlyCreatedDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(
exception,
mostRecentlyCreatedDiagnostics);
} else {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout,
DiagnosticsClientContext diagnosticsClientContext) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(
requestOptions, cancellationException, diagnosticsClientContext);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
return requestObs
.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout(
RequestOptions requestOptions,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono,
ScopedDiagnosticsFactory scopedDiagnosticsFactory) {
requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (latestCosmosDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout));
}
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationExceptionForPointOperations(
scopedDiagnosticsFactory,
throwable,
requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook()));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationExceptionForPointOperations(
ScopedDiagnosticsFactory scopedDiagnosticsFactory,
Throwable throwable,
AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get();
if (actualCallback != null) {
logger.trace("Calling actual Mark E2E timeout callback");
actualCallback.run();
}
CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics();
if (lastDiagnosticsSnapshot == null) {
scopedDiagnosticsFactory.createDiagnostics();
}
BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics());
return exception;
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (cosmosDiagnostics != null) {
BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
nonNullRequestOptions,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
scopedDiagnosticsFactory),
finalRetryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs =
getCreateDocumentRequest(
retryPolicyInstance,
collectionLink,
document,
options,
disableAutomaticIdGeneration,
OperationType.Upsert,
clientContextOverride);
return reqObs
.flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
if (nonNullRequestOptions.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
nonNullRequestOptions,
finalRequestRetryPolicy,
endToEndPolicyConfig,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs
.flatMap(req -> replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class)));
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
nonNullRequestOptions,
documentClientRetryPolicy,
scopedDiagnosticsFactory),
documentClientRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
return requestObs
.flatMap(req -> patch(request, retryPolicyInstance))
.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
nonNullRequestOptions,
requestRetryPolicy,
scopedDiagnosticsFactory),
requestRetryPolicy),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
return requestObs
.flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(
String documentLink,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions();
ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false);
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory);
return getPointOperationResponseMonoWithE2ETimeout(
nonNullRequestOptions,
endToEndPolicyConfig,
ObservableHelper.inlineIfPossibleAsObs(
() -> readDocumentInternal(
documentLink,
nonNullRequestOptions,
retryPolicyInstance,
scopedDiagnosticsFactory),
retryPolicyInstance),
scopedDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(
String documentLink,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
options.getMarkE2ETimeoutInRequestContextCallbackHook().set(
() -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true)));
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req ->
this.read(request, retryPolicyInstance)
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(
feedResponseFlux,
endToEndPolicyConfig,
options,
isQueryCancelledOnTimeout,
diagnosticsFactory);
}
return feedResponseFlux;
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx));
} else {
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
diagnosticsFactory,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink, forceRefresh),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(
RxDocumentServiceRequest request,
String collectionLink,
boolean forceRefresh) {
logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES,
forceRefresh,
null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null);
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
mostRecentlyCreatedDiagnostics.set(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
@Override
public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() {
return this.mostRecentlyCreatedDiagnostics.get();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} |
```suggestion ``` | void verifyOpenTelemetryVersion() {
String currentOTelVersion = otelResource.getAttribute(ResourceAttributes.TELEMETRY_SDK_VERSION);
System.out.println("currentOTelVersion = " + currentOTelVersion);
assertThat(OpenTelemetryVersionCheckRunner.STARTER_OTEL_VERSION)
.as(
"Dear developer, You may have updated the OpenTelemetry dependencies of spring-cloud-azure-starter-monitor without updating the OTel starter version declared in "
+ OpenTelemetryVersionCheckRunner.class
+ ".")
.isEqualTo(currentOTelVersion);
} | System.out.println("currentOTelVersion = " + currentOTelVersion); | void verifyOpenTelemetryVersion() {
String currentOTelVersion = otelResource.getAttribute(ResourceAttributes.TELEMETRY_SDK_VERSION);
assertThat(OpenTelemetryVersionCheckRunner.STARTER_OTEL_VERSION)
.as(
"Dear developer, You may have updated the OpenTelemetry dependencies of spring-cloud-azure-starter-monitor without updating the OTel starter version declared in "
+ OpenTelemetryVersionCheckRunner.class
+ ".")
.isEqualTo(currentOTelVersion);
} | class TestConfig {
@Bean
HttpPipeline httpPipeline() {
countDownLatch = new CountDownLatch(2);
customValidationPolicy = new CustomValidationPolicy(countDownLatch);
return getHttpPipeline(customValidationPolicy);
}
HttpPipeline getHttpPipeline(@Nullable HttpPipelinePolicy policy) {
return new HttpPipelineBuilder()
.httpClient(HttpClient.createDefault())
.policies(policy)
.build();
}
@Bean
@Primary
SelfDiagnosticsLevel testSelfDiagnosticsLevel() {
return SelfDiagnosticsLevel.DEBUG;
}
} | class TestConfig {
@Bean
HttpPipeline httpPipeline() {
countDownLatch = new CountDownLatch(2);
customValidationPolicy = new CustomValidationPolicy(countDownLatch);
return getHttpPipeline(customValidationPolicy);
}
HttpPipeline getHttpPipeline(@Nullable HttpPipelinePolicy policy) {
return new HttpPipelineBuilder()
.httpClient(HttpClient.createDefault())
.policies(policy)
.build();
}
@Bean
@Primary
SelfDiagnosticsLevel testSelfDiagnosticsLevel() {
return SelfDiagnosticsLevel.DEBUG;
}
} |
I think even for hierarchicalPartitionKeys, each path can also be nested partitionKey, so we might need to do the same parsing as line 285 - 298 | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | f = entity.getClass().getDeclaredField(path.substring(1)); | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} |
@kushagraThapar do we allow nested PK's in hierarchical PK's? If so we will need to figure out our approach. Nested PK's are defined like this: `@Container(partitionKeyPath = "/nestedEntitySample/nestedPartitionKey")` And Hierarchical PK's are defined like this: `Container(hierarchicalPartitionKeyPaths = {"/id", "/firstName", "/lastName"})` The only way we currently know what type of PK it is is from the name identifier, we don't determine it based on the structure of the text. With this there is no way for us to know if there is a nested key in a hierarchical key. Thoughts? | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | f = entity.getClass().getDeclaredField(path.substring(1)); | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} |
Hmm, @xinlian12 can you please explain with some example? | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | f = entity.getClass().getDeclaredField(path.substring(1)); | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} |
I do not know whether it is a valid scenario, I am wondering whether the following config is valid: Container(hierarchicalPartitionKeyPaths = {"/id", "/nestedEntity/firstName", "/nestedEntity/lastName"}) @kushagraThapar @trande4884 | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | f = entity.getClass().getDeclaredField(path.substring(1)); | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} |
Agreed, I think we should test it out - @trande4884 - can we test this out and see if it passes or fails. One way to test this would be first directly in the Java SDK and then in spring data cosmos. Given this is the last open comment, let's try to get some clarity on it and then we should be able to release this work :) | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | f = entity.getClass().getDeclaredField(path.substring(1)); | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} |
Unless we fundamentally change how we handle PK's that example is not going to work. We have no way of determining that a key that is just a string in the heirarchical key is a nested key, the only way we know if a key is nested is if it defined by the variable "partitionKeyPath" like the following: @Container(containerName = "nested-partition-key", partitionKeyPath = "/nestedEntity/nestedPartitionKey", ru = TestConstants.DEFAULT_MINIMUM_RU) If you look at CosmosEntityInformation in this PR you will see we don't dynamically identify what kind of type of PK it is, we either look for one established with the @PartitionKey tag (standard PK) or one to be setup with the identifier "partitionKeyPath" (nested PK) or "hierarchicalPartitionKeyPaths" (sub PK). If we want to fundamentally change how we do this to dynamically identify the PK's and the corresponding type I am not opposed but I believe it should be done outside of this PR especially as it would be a breaking change. | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | f = entity.getClass().getDeclaredField(path.substring(1)); | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} |
Makes sense @trande4884 - and we don't want to put a breaking change right now. So may be we will leave it as it is for now, and if its a limitation, let it be for at least the first couple of releases with sub-partitioning. | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | f = entity.getClass().getDeclaredField(path.substring(1)); | public Object getPartitionKeyFieldValue(T entity) {
if (partitionKeyField != null) {
return ReflectionUtils.getField(partitionKeyField, entity);
} else if (partitionKeyPath != null) {
List<String> parts = Arrays.stream(partitionKeyPath.split("/")).collect(Collectors.toList());
final Object[] currentObject = {entity};
parts.forEach(part -> {
if (!part.isEmpty()) {
Field f = null;
try {
f = currentObject[0].getClass().getDeclaredField(part);
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
currentObject[0] = ReflectionUtils.getField(f, currentObject[0]);
}
});
return currentObject[0];
} else if (hierarchicalPartitionKeyPaths != null && hierarchicalPartitionKeyPaths.length > 0) {
ArrayList<Object> pkValues = new ArrayList<>();
for (final String path : hierarchicalPartitionKeyPaths) {
Field f = null;
try {
f = entity.getClass().getDeclaredField(path.substring(1));
} catch (NoSuchFieldException e) {
throw new RuntimeException(e);
}
ReflectionUtils.makeAccessible(f);
pkValues.add(ReflectionUtils.getField(f, entity));
}
return pkValues;
} else {
return null;
}
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} | class of id type
*/
@SuppressWarnings("unchecked")
public Class<ID> getIdType() {
return (Class<ID>) id.getType();
} |
nit: `itemToQuery` is unused. | public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
} | TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size())); | public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
CosmosItemResponse<?> response = container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
}
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
}
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} |
nit: `itemToQuery` is unused. | public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
} | TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size())); | public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
@Test(groups = {"fast"}, timeOut = 10000L)
public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
CosmosItemResponse<?> response = container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
}
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
@Test(groups = {"fast"}, timeOut = 10000L)
public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
}
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} |
nit: The change can be removed altogether since `response` is unused. | public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
CosmosItemResponse<?> response = container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
} | CosmosItemResponse<?> response = container.createItem(obj).block(); | public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} |
Just curious - we need these changes because of the global copy of `CosmosClientBuilder`? | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
CosmosAsyncClient client = ImplementationBridgeHelpers
.CosmosAsyncDatabaseHelper
.getCosmosAsyncDatabaseAccessor()
.getCosmosAsyncClient(database);
boolean isMultiRegional = ImplementationBridgeHelpers
.CosmosAsyncClientHelper
.getCosmosAsyncClientAccessor()
.getPreferredRegions(client).size() > 1;
if (throughput > 6000 || isMultiRegional) {
try {
Thread.sleep(3000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
return BridgeInternal.getProperties(cosmosContainer.createItem(item, options).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef, options));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey), options).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
cosmosContainer.deleteItem(documentId, PartitionKey.NONE, options).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | options.setCosmosEndToEndOperationLatencyPolicyConfig( | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
CosmosAsyncClient client = ImplementationBridgeHelpers
.CosmosAsyncDatabaseHelper
.getCosmosAsyncDatabaseAccessor()
.getCosmosAsyncClient(database);
boolean isMultiRegional = ImplementationBridgeHelpers
.CosmosAsyncClientHelper
.getCosmosAsyncClientAccessor()
.getPreferredRegions(client).size() > 1;
if (throughput > 6000 || isMultiRegional) {
try {
Thread.sleep(3000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
return BridgeInternal.getProperties(cosmosContainer.createItem(item, options).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef, options));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey), options).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
cosmosContainer.deleteItem(documentId, PartitionKey.NONE, options).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} |
Fixed | public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
} | TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size())); | public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
CosmosItemResponse<?> response = container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
}
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
}
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} |
Fixed | public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
} | TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size())); | public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
@Test(groups = {"fast"}, timeOut = 10000L)
public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
CosmosItemResponse<?> response = container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
}
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
@Test(groups = {"fast"}, timeOut = 10000L)
public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
}
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} |
Fixed | public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
CosmosItemResponse<?> response = container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
} | CosmosItemResponse<?> response = container.createItem(obj).block(); | public void clientLevelEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosClientBuilder builder = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.endToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig)
.credential(credential);
try (CosmosAsyncClient cosmosAsyncClient = builder.buildAsyncClient()) {
String dbname = "db_" + UUID.randomUUID();
String containerName = "container_" + UUID.randomUUID();
CosmosContainerProperties properties = new CosmosContainerProperties(containerName, "/mypk");
cosmosAsyncClient.createDatabaseIfNotExists(dbname).block();
cosmosAsyncClient.getDatabase(dbname)
.createContainerIfNotExists(properties).block();
CosmosAsyncContainer container = cosmosAsyncClient.getDatabase(dbname)
.getContainer(containerName);
TestObject obj = new TestObject(UUID.randomUUID().toString(),
"name123",
2,
UUID.randomUUID().toString());
container.createItem(obj).block();
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
injectFailure(container, FaultInjectionOperationType.READ_ITEM, null);
verifyExpectError(cosmosItemResponseMono);
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
CosmosPagedFlux<TestObject> queryPagedFlux = container.queryItems(sqlQuerySpec, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
FaultInjectionRule faultInjectionRule = injectFailure(container, FaultInjectionOperationType.QUERY_ITEM, null);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
cosmosItemResponseMono =
container.readItem(obj.id, new PartitionKey(obj.mypk), options, TestObject.class);
StepVerifier.create(cosmosItemResponseMono)
.expectNextCount(1)
.expectComplete()
.verify();
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.enable(false)
.build());
queryPagedFlux = container.queryItems(sqlQuerySpec, queryRequestOptions, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1)
.expectComplete()
.verify();
faultInjectionRule.disable();
cosmosAsyncClient.getDatabase(dbname).delete().block();
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
TestObject itemToQuery = createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} | class EndToEndTimeOutValidationTests extends TestSuiteBase {
private static final int DEFAULT_NUM_DOCUMENTS = 100;
private static final int DEFAULT_PAGE_SIZE = 100;
private CosmosAsyncContainer createdContainer;
private final Random random;
private final List<TestObject> createdDocuments = new ArrayList<>();
private final CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig;
@Factory(dataProvider = "clientBuildersWithDirectTcpSession")
public EndToEndTimeOutValidationTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
random = new Random();
endToEndOperationLatencyPolicyConfig = new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
}
@BeforeClass(groups = {"fast"}, timeOut = SETUP_TIMEOUT * 100)
public void beforeClass() throws Exception {
initializeClient(null);
}
public void initializeClient(CosmosEndToEndOperationLatencyPolicyConfig e2eDefaultConfig) {
CosmosAsyncClient client = this
.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(e2eDefaultConfig)
.buildAsyncClient();
createdContainer = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdContainer);
createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer));
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void readItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutEvenWhenDisabledForNonPointOperations() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
initializeClient(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = null;
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
TestObject itemToRead = createdDocuments.get(random.nextInt(createdDocuments.size()));
rule = injectFailure(createdContainer, FaultInjectionOperationType.READ_ITEM, null);
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.readItem(itemToRead.id, new PartitionKey(itemToRead.mypk), options, TestObject.class);
verifyExpectError(cosmosItemResponseMono);
} finally {
if (rule != null) {
rule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void createItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.CREATE_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void replaceItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
createdContainer.createItem(inputObject, new PartitionKey(inputObject.mypk), options).block();
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.REPLACE_ITEM, null);
inputObject.setName("replaceName");
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.replaceItem(inputObject, inputObject.id, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void upsertItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
FaultInjectionRule rule = injectFailure(createdContainer, FaultInjectionOperationType.UPSERT_ITEM, null);
TestObject inputObject = new TestObject(UUID.randomUUID().toString(), "name123", 1, UUID.randomUUID().toString());
Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono =
createdContainer.upsertItem(inputObject, new PartitionKey(inputObject.mypk), options);
verifyExpectError(cosmosItemResponseMono);
rule.disable();
}
static void verifyExpectError(Mono<CosmosItemResponse<TestObject>> cosmosItemResponseMono) {
StepVerifier.create(cosmosItemResponseMono)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException)
.verify();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeout() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndOperationLatencyPolicyConfig);
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldTimeoutWithClientConfig() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig =
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build();
initializeClient(endToEndOperationLatencyPolicyConfig);
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
FaultInjectionRule faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectErrorMatches(throwable -> throwable instanceof OperationCancelledException
&& ((OperationCancelledException) throwable).getSubStatusCode()
== HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT)
.verify();
faultInjectionRule.disable();
}
@Test(groups = {"fast"}, timeOut = 10000L)
public void queryItemWithEndToEndTimeoutPolicyInOptionsShouldNotTimeoutWhenSuppressed() {
if (getClientBuilder().buildConnectionPolicy().getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("Failure injection only supported for DIRECT mode");
}
System.setProperty(
Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED,
"true");
logger.info(
"isDefaultE2ETimeoutDisabledForNonPointOperations() after setting system property {}",
Configs.isDefaultE2ETimeoutDisabledForNonPointOperations());
initializeClient(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(1))
.build()
);
FaultInjectionRule faultInjectionRule = null;
try {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
createdDocuments.get(random.nextInt(createdDocuments.size()));
String queryText = "select top 1 * from c";
SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(queryText);
faultInjectionRule = injectFailure(createdContainer, FaultInjectionOperationType.QUERY_ITEM, null);
CosmosPagedFlux<TestObject> queryPagedFlux = createdContainer.queryItems(sqlQuerySpec, options, TestObject.class);
StepVerifier.create(queryPagedFlux)
.expectNextCount(1L)
.verifyComplete();
} finally {
if (faultInjectionRule != null) {
faultInjectionRule.disable();
}
System.clearProperty(Configs.DEFAULT_E2E_FOR_NON_POINT_DISABLED);
}
}
@Test(groups = {"fast"}, timeOut = 10000L)
private FaultInjectionRule injectFailure(
CosmosAsyncContainer container,
FaultInjectionOperationType operationType,
Boolean suppressServiceRequests) {
FaultInjectionServerErrorResultBuilder faultInjectionResultBuilder = FaultInjectionResultBuilders
.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofMillis(1500))
.times(1);
if (suppressServiceRequests != null) {
faultInjectionResultBuilder.suppressServiceRequests(suppressServiceRequests);
}
IFaultInjectionResult result = faultInjectionResultBuilder.build();
FaultInjectionCondition condition = new FaultInjectionConditionBuilder()
.operationType(operationType)
.connectionType(FaultInjectionConnectionType.DIRECT)
.build();
FaultInjectionRule rule = new FaultInjectionRuleBuilder("InjectedResponseDelay")
.condition(condition)
.result(result)
.build();
FaultInjectorProvider injectorProvider = (FaultInjectorProvider) container
.getOrConfigureFaultInjectorProvider(() -> new FaultInjectorProvider(container));
injectorProvider.configureFaultInjectionRules(Arrays.asList(rule)).block();
return rule;
}
private TestObject getDocumentDefinition(String documentId, String partitionKey) {
int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2);
TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey);
return doc;
}
private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) {
List<TestObject> documentsToInsert = new ArrayList<>();
for (int i = 0; i < documentCount; i++) {
documentsToInsert.add(
getDocumentDefinition(
UUID.randomUUID().toString(),
partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size()))));
}
List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert);
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
return documentInserted;
}
static class TestObject {
String id;
String name;
int prop;
String mypk;
String constantProp = "constantProp";
public TestObject() {
}
public TestObject(String id, String name, int prop, String mypk) {
this.id = id;
this.name = name;
this.prop = prop;
this.mypk = mypk;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getProp() {
return prop;
}
public void setProp(final int prop) {
this.prop = prop;
}
public String getMypk() {
return mypk;
}
public void setMypk(String mypk) {
this.mypk = mypk;
}
public String getConstantProp() {
return constantProp;
}
}
} |
Because to test the new override the e2e policy has to be applied to client level - and then it can have side effects because the client instance is often used for an entire test class. | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
CosmosAsyncClient client = ImplementationBridgeHelpers
.CosmosAsyncDatabaseHelper
.getCosmosAsyncDatabaseAccessor()
.getCosmosAsyncClient(database);
boolean isMultiRegional = ImplementationBridgeHelpers
.CosmosAsyncClientHelper
.getCosmosAsyncClientAccessor()
.getPreferredRegions(client).size() > 1;
if (throughput > 6000 || isMultiRegional) {
try {
Thread.sleep(3000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
return BridgeInternal.getProperties(cosmosContainer.createItem(item, options).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef, options));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey), options).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
cosmosContainer.deleteItem(documentId, PartitionKey.NONE, options).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | options.setCosmosEndToEndOperationLatencyPolicyConfig( | protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) {
CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties();
String cosmosContainerId = cosmosContainerProperties.getId();
logger.info("Truncating collection {} ...", cosmosContainerId);
List<String> paths = cosmosContainerProperties.getPartitionKeyDefinition().getPaths();
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
options.setMaxDegreeOfParallelism(-1);
int maxItemCount = 100;
logger.info("Truncating collection {} documents ...", cosmosContainer.getId());
cosmosContainer.queryItems("SELECT * FROM root", options, InternalObjectNode.class)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(doc -> {
PartitionKey partitionKey = null;
Object propertyValue = null;
if (paths != null && !paths.isEmpty()) {
List<String> pkPath = PathParser.getPathParts(paths.get(0));
propertyValue = ModelBridgeInternal.getObjectByPathFromJsonSerializable(doc, pkPath);
if (propertyValue == null) {
partitionKey = PartitionKey.NONE;
} else {
partitionKey = new PartitionKey(propertyValue);
}
} else {
partitionKey = new PartitionKey(null);
}
return cosmosContainer.deleteItem(doc.getId(), partitionKey);
}).then().block();
logger.info("Truncating collection {} triggers ...", cosmosContainerId);
cosmosContainer.getScripts().queryTriggers("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(trigger -> {
return cosmosContainer.getScripts().getTrigger(trigger.getId()).delete();
}).then().block();
logger.info("Truncating collection {} storedProcedures ...", cosmosContainerId);
cosmosContainer.getScripts().queryStoredProcedures("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(storedProcedure -> {
return cosmosContainer.getScripts().getStoredProcedure(storedProcedure.getId()).delete(new CosmosStoredProcedureRequestOptions());
}).then().block();
logger.info("Truncating collection {} udfs ...", cosmosContainerId);
cosmosContainer.getScripts().queryUserDefinedFunctions("SELECT * FROM root", options)
.byPage(maxItemCount)
.publishOn(Schedulers.parallel())
.flatMap(page -> Flux.fromIterable(page.getResults()))
.flatMap(udf -> {
return cosmosContainer.getScripts().getUserDefinedFunction(udf.getId()).delete();
}).then().block();
logger.info("Finished truncating collection {}.", cosmosContainerId);
}
@SuppressWarnings({"fallthrough"})
protected static void waitIfNeededForReplicasToCatchUp(CosmosClientBuilder clientBuilder) {
switch (CosmosBridgeInternal.getConsistencyLevel(clientBuilder)) {
case EVENTUAL:
case CONSISTENT_PREFIX:
logger.info(" additional wait in EVENTUAL mode so the replica catch up");
try {
TimeUnit.MILLISECONDS.sleep(WAIT_REPLICA_CATCH_UP_IN_MILLIS);
} catch (Exception e) {
logger.error("unexpected failure", e);
}
case SESSION:
case BOUNDED_STALENESS:
case STRONG:
default:
break;
}
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options, int throughput) {
database.createContainer(cosmosContainerProperties, ThroughputProperties.createManualThroughput(throughput), options).block();
CosmosAsyncClient client = ImplementationBridgeHelpers
.CosmosAsyncDatabaseHelper
.getCosmosAsyncDatabaseAccessor()
.getCosmosAsyncClient(database);
boolean isMultiRegional = ImplementationBridgeHelpers
.CosmosAsyncClientHelper
.getCosmosAsyncClientAccessor()
.getPreferredRegions(client).size() > 1;
if (throughput > 6000 || isMultiRegional) {
try {
Thread.sleep(3000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return database.getContainer(cosmosContainerProperties.getId());
}
public static CosmosAsyncContainer createCollection(CosmosAsyncDatabase database, CosmosContainerProperties cosmosContainerProperties,
CosmosContainerRequestOptions options) {
database.createContainer(cosmosContainerProperties, options).block();
return database.getContainer(cosmosContainerProperties.getId());
}
private static CosmosContainerProperties getCollectionDefinitionMultiPartitionWithCompositeAndSpatialIndexes() {
final String NUMBER_FIELD = "numberField";
final String STRING_FIELD = "stringField";
final String NUMBER_FIELD_2 = "numberField2";
final String STRING_FIELD_2 = "stringField2";
final String BOOL_FIELD = "boolField";
final String NULL_FIELD = "nullField";
final String OBJECT_FIELD = "objectField";
final String ARRAY_FIELD = "arrayField";
final String SHORT_STRING_FIELD = "shortStringField";
final String MEDIUM_STRING_FIELD = "mediumStringField";
final String LONG_STRING_FIELD = "longStringField";
final String PARTITION_KEY = "pk";
PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition();
ArrayList<String> partitionKeyPaths = new ArrayList<String>();
partitionKeyPaths.add("/" + PARTITION_KEY);
partitionKeyDefinition.setPaths(partitionKeyPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDefinition);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<List<CompositePath>> compositeIndexes = new ArrayList<>();
ArrayList<CompositePath> compositeIndexSimple = new ArrayList<CompositePath>();
CompositePath compositePath1 = new CompositePath();
compositePath1.setPath("/" + NUMBER_FIELD);
compositePath1.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath2 = new CompositePath();
compositePath2.setPath("/" + STRING_FIELD);
compositePath2.setOrder(CompositePathSortOrder.DESCENDING);
compositeIndexSimple.add(compositePath1);
compositeIndexSimple.add(compositePath2);
ArrayList<CompositePath> compositeIndexMaxColumns = new ArrayList<CompositePath>();
CompositePath compositePath3 = new CompositePath();
compositePath3.setPath("/" + NUMBER_FIELD);
compositePath3.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath4 = new CompositePath();
compositePath4.setPath("/" + STRING_FIELD);
compositePath4.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath5 = new CompositePath();
compositePath5.setPath("/" + NUMBER_FIELD_2);
compositePath5.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath6 = new CompositePath();
compositePath6.setPath("/" + STRING_FIELD_2);
compositePath6.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexMaxColumns.add(compositePath3);
compositeIndexMaxColumns.add(compositePath4);
compositeIndexMaxColumns.add(compositePath5);
compositeIndexMaxColumns.add(compositePath6);
ArrayList<CompositePath> compositeIndexPrimitiveValues = new ArrayList<CompositePath>();
CompositePath compositePath7 = new CompositePath();
compositePath7.setPath("/" + NUMBER_FIELD);
compositePath7.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath8 = new CompositePath();
compositePath8.setPath("/" + STRING_FIELD);
compositePath8.setOrder(CompositePathSortOrder.ASCENDING);
CompositePath compositePath9 = new CompositePath();
compositePath9.setPath("/" + BOOL_FIELD);
compositePath9.setOrder(CompositePathSortOrder.DESCENDING);
CompositePath compositePath10 = new CompositePath();
compositePath10.setPath("/" + NULL_FIELD);
compositePath10.setOrder(CompositePathSortOrder.ASCENDING);
compositeIndexPrimitiveValues.add(compositePath7);
compositeIndexPrimitiveValues.add(compositePath8);
compositeIndexPrimitiveValues.add(compositePath9);
compositeIndexPrimitiveValues.add(compositePath10);
ArrayList<CompositePath> compositeIndexLongStrings = new ArrayList<CompositePath>();
CompositePath compositePath11 = new CompositePath();
compositePath11.setPath("/" + STRING_FIELD);
CompositePath compositePath12 = new CompositePath();
compositePath12.setPath("/" + SHORT_STRING_FIELD);
CompositePath compositePath13 = new CompositePath();
compositePath13.setPath("/" + MEDIUM_STRING_FIELD);
CompositePath compositePath14 = new CompositePath();
compositePath14.setPath("/" + LONG_STRING_FIELD);
compositeIndexLongStrings.add(compositePath11);
compositeIndexLongStrings.add(compositePath12);
compositeIndexLongStrings.add(compositePath13);
compositeIndexLongStrings.add(compositePath14);
compositeIndexes.add(compositeIndexSimple);
compositeIndexes.add(compositeIndexMaxColumns);
compositeIndexes.add(compositeIndexPrimitiveValues);
compositeIndexes.add(compositeIndexLongStrings);
indexingPolicy.setCompositeIndexes(compositeIndexes);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static CosmosAsyncContainer createCollection(CosmosAsyncClient client, String dbId, CosmosContainerProperties collectionDefinition) {
CosmosAsyncDatabase database = client.getDatabase(dbId);
database.createContainer(collectionDefinition).block();
return database.getContainer(collectionDefinition.getId());
}
public static void deleteCollection(CosmosAsyncClient client, String dbId, String collectionId) {
client.getDatabase(dbId).getContainer(collectionId).delete().block();
}
public static InternalObjectNode createDocument(CosmosAsyncContainer cosmosContainer, InternalObjectNode item) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
return BridgeInternal.getProperties(cosmosContainer.createItem(item, options).block());
}
public <T> Flux<CosmosItemResponse<T>> bulkInsert(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList,
int concurrencyLevel) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
List<Mono<CosmosItemResponse<T>>> result =
new ArrayList<>(documentDefinitionList.size());
for (T docDef : documentDefinitionList) {
result.add(cosmosContainer.createItem(docDef, options));
}
return Flux.merge(Flux.fromIterable(result), concurrencyLevel);
}
public <T> List<T> bulkInsertBlocking(CosmosAsyncContainer cosmosContainer,
List<T> documentDefinitionList) {
return bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.map(itemResponse -> itemResponse.getItem())
.collectList()
.block();
}
public <T> void voidBulkInsertBlocking(CosmosAsyncContainer cosmosContainer, List<T> documentDefinitionList) {
bulkInsert(cosmosContainer, documentDefinitionList, DEFAULT_BULK_INSERT_CONCURRENCY_LEVEL)
.publishOn(Schedulers.parallel())
.then()
.block();
}
public static CosmosAsyncUser createUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties userSettings) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
CosmosUserResponse userResponse = database.createUser(userSettings).block();
return database.getUser(userResponse.getProperties().getId());
}
public static CosmosAsyncUser safeCreateUser(CosmosAsyncClient client, String databaseId, CosmosUserProperties user) {
deleteUserIfExists(client, databaseId, user.getId());
return createUser(client, databaseId, user);
}
private static CosmosAsyncContainer safeCreateCollection(CosmosAsyncClient client, String databaseId, CosmosContainerProperties collection, CosmosContainerRequestOptions options) {
deleteCollectionIfExists(client, databaseId, collection.getId());
return createCollection(client.getDatabase(databaseId), collection, options);
}
static protected CosmosContainerProperties getCollectionDefinitionWithFullFidelity() {
CosmosContainerProperties cosmosContainerProperties = getCollectionDefinition(UUID.randomUUID().toString());
cosmosContainerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createAllVersionsAndDeletesPolicy(Duration.ofMinutes(5)));
return cosmosContainerProperties;
}
static protected CosmosContainerProperties getCollectionDefinition() {
return getCollectionDefinition(UUID.randomUUID().toString());
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinition(String collectionId, PartitionKeyDefinition partitionKeyDefinition) {
return new CosmosContainerProperties(collectionId, partitionKeyDefinition);
}
static protected CosmosContainerProperties getCollectionDefinitionForHashV2(String collectionId) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
partitionKeyDef.setVersion(PartitionKeyDefinitionVersion.V2);
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(collectionId, partitionKeyDef);
return collectionDefinition;
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndexWithIdAsPartitionKey() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/id"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex() {
return getCollectionDefinitionWithRangeRangeIndex(Collections.singletonList("/mypk"));
}
static protected CosmosContainerProperties getCollectionDefinitionWithRangeRangeIndex(List<String> partitionKeyPath) {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
partitionKeyDef.setPaths(partitionKeyPath);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
CosmosContainerProperties cosmosContainerProperties = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef);
cosmosContainerProperties.setIndexingPolicy(indexingPolicy);
return cosmosContainerProperties;
}
public static void deleteCollectionIfExists(CosmosAsyncClient client, String databaseId, String collectionId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
List<CosmosContainerProperties> res = database.queryContainers(String.format("SELECT * FROM root r where r.id = '%s'", collectionId), null)
.collectList()
.block();
if (!res.isEmpty()) {
deleteCollection(database, collectionId);
}
}
public static void deleteCollection(CosmosAsyncDatabase cosmosDatabase, String collectionId) {
cosmosDatabase.getContainer(collectionId).delete().block();
}
public static void deleteCollection(CosmosAsyncContainer cosmosContainer) {
cosmosContainer.delete().block();
}
public static void deleteDocumentIfExists(CosmosAsyncClient client, String databaseId, String collectionId, String docId) {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setPartitionKey(new PartitionKey(docId));
CosmosAsyncContainer cosmosContainer = client.getDatabase(databaseId).getContainer(collectionId);
List<InternalObjectNode> res = cosmosContainer
.queryItems(String.format("SELECT * FROM root r where r.id = '%s'", docId), options, InternalObjectNode.class)
.byPage()
.flatMap(page -> Flux.fromIterable(page.getResults()))
.collectList().block();
if (!res.isEmpty()) {
deleteDocument(cosmosContainer, docId);
}
}
public static void safeDeleteDocument(CosmosAsyncContainer cosmosContainer, String documentId, Object partitionKey) {
if (cosmosContainer != null && documentId != null) {
try {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
cosmosContainer.deleteItem(documentId, new PartitionKey(partitionKey), options).block();
} catch (Exception e) {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null || dce.getStatusCode() != 404) {
throw e;
}
}
}
}
public static void deleteDocument(CosmosAsyncContainer cosmosContainer, String documentId) {
CosmosItemRequestOptions options = new CosmosItemRequestOptions()
.setCosmosEndToEndOperationLatencyPolicyConfig(
new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofHours(1))
.build()
);
cosmosContainer.deleteItem(documentId, PartitionKey.NONE, options).block();
}
public static void deleteUserIfExists(CosmosAsyncClient client, String databaseId, String userId) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
client.getDatabase(databaseId).read().block();
List<CosmosUserProperties> res = database
.queryUsers(String.format("SELECT * FROM root r where r.id = '%s'", userId), null)
.collectList().block();
if (!res.isEmpty()) {
deleteUser(database, userId);
}
}
public static void deleteUser(CosmosAsyncDatabase database, String userId) {
database.getUser(userId).delete().block();
}
static private CosmosAsyncDatabase safeCreateDatabase(CosmosAsyncClient client, CosmosDatabaseProperties databaseSettings) {
safeDeleteDatabase(client.getDatabase(databaseSettings.getId()));
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosAsyncDatabase createDatabase(CosmosAsyncClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
static protected CosmosDatabase createSyncDatabase(CosmosClient client, String databaseId) {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
try {
client.createDatabase(databaseSettings);
return client.getDatabase(databaseSettings.getId());
} catch (CosmosException e) {
e.printStackTrace();
}
return null;
}
static protected CosmosAsyncDatabase createDatabaseIfNotExists(CosmosAsyncClient client, String databaseId) {
List<CosmosDatabaseProperties> res = client.queryDatabases(String.format("SELECT * FROM r where r.id = '%s'", databaseId), null)
.collectList()
.block();
if (res.size() != 0) {
CosmosAsyncDatabase database = client.getDatabase(databaseId);
database.read().block();
return database;
} else {
CosmosDatabaseProperties databaseSettings = new CosmosDatabaseProperties(databaseId);
client.createDatabase(databaseSettings).block();
return client.getDatabase(databaseSettings.getId());
}
}
static protected void safeDeleteDatabase(CosmosAsyncDatabase database) {
if (database != null) {
try {
database.delete().block();
} catch (Exception e) {
}
}
}
static protected void safeDeleteSyncDatabase(CosmosDatabase database) {
if (database != null) {
try {
logger.info("attempting to delete database ....");
database.delete();
logger.info("database deletion completed");
} catch (Exception e) {
logger.error("failed to delete sync database", e);
}
}
}
static protected void safeDeleteAllCollections(CosmosAsyncDatabase database) {
if (database != null) {
List<CosmosContainerProperties> collections = database.readAllContainers()
.collectList()
.block();
for(CosmosContainerProperties collection: collections) {
database.getContainer(collection.getId()).delete().block();
}
}
}
static protected void safeDeleteCollection(CosmosAsyncContainer collection) {
if (collection != null) {
try {
logger.info("attempting to delete container {}.{}....",
collection.getDatabase().getId(),
collection.getId());
collection.delete().block();
logger.info("Container {}.{} deletion completed",
collection.getDatabase().getId(),
collection.getId());
} catch (Exception e) {
boolean shouldLogAsError = true;
if (e instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) e;
if (cosmosException.getStatusCode() == 404) {
shouldLogAsError = false;
logger.info(
"Container {}.{} does not exist anymore.",
collection.getDatabase().getId(),
collection.getId());
}
}
if (shouldLogAsError) {
logger.error("failed to delete sync container {}.{}",
collection.getDatabase().getId(),
collection.getId(),
e);
}
}
finally {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
static protected void safeDeleteCollection(CosmosAsyncDatabase database, String collectionId) {
if (database != null && collectionId != null) {
try {
safeDeleteCollection(database.getContainer(collectionId));
} catch (Exception e) {
}
}
}
static protected void safeCloseAsync(CosmosAsyncClient client) {
if (client != null) {
new Thread(() -> {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}).start();
}
}
static protected void safeClose(CosmosAsyncClient client) {
if (client != null) {
try {
client.close();
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
static protected void safeCloseSyncClient(CosmosClient client) {
if (client != null) {
try {
logger.info("closing client ...");
client.close();
logger.info("closing client completed");
} catch (Exception e) {
logger.error("failed to close client", e);
}
}
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator) {
validateSuccess(single, validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public <T extends CosmosResponse> void validateSuccess(Mono<T> single, CosmosResponseValidator<T> validator, long timeout) {
validateSuccess(single.flux(), validator, timeout);
}
@SuppressWarnings("rawtypes")
public static <T extends CosmosResponse> void validateSuccess(Flux<T> flowable,
CosmosResponseValidator<T> validator, long timeout) {
TestSubscriber<T> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T, U extends CosmosResponse> void validateFailure(Mono<U> mono, FailureValidator validator)
throws InterruptedException {
validateFailure(mono.flux(), validator, subscriberValidationTimeout);
}
@SuppressWarnings("rawtypes")
public static <T extends Resource, U extends CosmosResponse> void validateFailure(Flux<U> flowable,
FailureValidator validator, long timeout) throws InterruptedException {
TestSubscriber<CosmosResponse> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemSuccess(
Mono<T> responseMono, CosmosItemResponseValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
testSubscriber.assertValueCount(1);
validator.validate(testSubscriber.values().get(0));
}
@SuppressWarnings("rawtypes")
public <T extends CosmosItemResponse> void validateItemFailure(
Mono<T> responseMono, FailureValidator validator) {
TestSubscriber<CosmosItemResponse> testSubscriber = new TestSubscriber<>();
responseMono.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(subscriberValidationTimeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.errors()).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
public <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator) {
validateQuerySuccess(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQuerySuccess(Flux<FeedResponse<T>> flowable,
FeedResponseListValidator<T> validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
validator.validate(testSubscriber.values());
}
public static <T> void validateQuerySuccessWithContinuationTokenAndSizes(
String query,
CosmosAsyncContainer container,
int[] pageSizes,
FeedResponseListValidator<T> validator,
Class<T> classType) {
for (int pageSize : pageSizes) {
List<FeedResponse<T>> receivedDocuments = queryWithContinuationTokens(query, container, pageSize, classType);
validator.validate(receivedDocuments);
}
}
public static <T> List<FeedResponse<T>> queryWithContinuationTokens(
String query,
CosmosAsyncContainer container,
int pageSize,
Class<T> classType) {
String requestContinuation = null;
List<String> continuationTokens = new ArrayList<String>();
List<FeedResponse<T>> responseList = new ArrayList<>();
do {
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<T> queryObservable = container.queryItems(query, options, classType);
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
queryObservable.byPage(requestContinuation, pageSize).subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS);
testSubscriber.assertNoErrors();
testSubscriber.assertComplete();
@SuppressWarnings("unchecked")
FeedResponse<T> firstPage = (FeedResponse<T>) testSubscriber.getEvents().get(0).get(0);
requestContinuation = firstPage.getContinuationToken();
responseList.add(firstPage);
continuationTokens.add(requestContinuation);
} while (requestContinuation != null);
return responseList;
}
public <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable, FailureValidator validator) {
validateQueryFailure(flowable, validator, subscriberValidationTimeout);
}
public static <T> void validateQueryFailure(Flux<FeedResponse<T>> flowable,
FailureValidator validator, long timeout) {
TestSubscriber<FeedResponse<T>> testSubscriber = new TestSubscriber<>();
flowable.subscribe(testSubscriber);
testSubscriber.awaitTerminalEvent(timeout, TimeUnit.MILLISECONDS);
testSubscriber.assertNotComplete();
testSubscriber.assertTerminated();
assertThat(testSubscriber.getEvents().get(1)).hasSize(1);
validator.validate((Throwable) testSubscriber.getEvents().get(1).get(0));
}
@DataProvider
public static Object[][] clientBuilders() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithGateway() {
return new Object[][]{{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}};
}
@DataProvider
public static Object[][] clientBuildersWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)},
{createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true)}
};
}
@DataProvider
public static Object[][] clientBuilderSolelyDirectWithSessionConsistency() {
return new Object[][]{
{createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, true)}
};
}
static ConsistencyLevel parseConsistency(String consistency) {
if (consistency != null) {
consistency = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency).trim();
return ConsistencyLevel.valueOf(consistency);
}
logger.error("INVALID configured test consistency [{}].", consistency);
throw new IllegalStateException("INVALID configured test consistency " + consistency);
}
static List<String> parsePreferredLocation(String preferredLocations) {
if (StringUtils.isEmpty(preferredLocations)) {
return null;
}
try {
return objectMapper.readValue(preferredLocations, new TypeReference<List<String>>() {
});
} catch (Exception e) {
logger.error("INVALID configured test preferredLocations [{}].", preferredLocations);
throw new IllegalStateException("INVALID configured test preferredLocations " + preferredLocations);
}
}
static List<Protocol> parseProtocols(String protocols) {
if (StringUtils.isEmpty(protocols)) {
return null;
}
List<Protocol> protocolList = new ArrayList<>();
try {
List<String> protocolStrings = objectMapper.readValue(protocols, new TypeReference<List<String>>() {
});
for(String protocol : protocolStrings) {
protocolList.add(Protocol.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, protocol)));
}
return protocolList;
} catch (Exception e) {
logger.error("INVALID configured test protocols [{}].", protocols);
throw new IllegalStateException("INVALID configured test protocols " + protocols);
}
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirect() {
return simpleClientBuildersWithDirect(true, true, true, toArray(protocols));
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectHttps() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcp() {
return simpleClientBuildersWithDirect(true, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithJustDirectTcp() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return simpleClientBuildersWithDirect(false, true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuildersWithoutRetryOnThrottledRequests() {
return new Object[][]{
{ createDirectRxDocumentClient(ConsistencyLevel.SESSION, Protocol.TCP, false, null, true, false) },
{ createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, false) }
};
}
private static Object[][] simpleClientBuildersWithDirect(
boolean contentResponseOnWriteEnabled,
Protocol... protocols) {
return simpleClientBuildersWithDirect(true, contentResponseOnWriteEnabled, true, protocols);
}
private static Object[][] simpleClientBuildersWithDirect(
boolean includeGateway,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
List<ConsistencyLevel> testConsistencies = ImmutableList.of(ConsistencyLevel.EVENTUAL);
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(
consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
if (includeGateway) {
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
false,
null,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
}
return cosmosConfigurations.stream().map(b -> new Object[]{b}).collect(Collectors.toList()).toArray(new Object[0][]);
}
@DataProvider
public static Object[][] clientBuildersWithDirect() {
return clientBuildersWithDirectAllConsistencies(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectHttps() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.HTTPS);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcp() {
return clientBuildersWithDirectAllConsistencies(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpWithContentResponseOnWriteDisabled() {
return clientBuildersWithDirectAllConsistencies(false, true, Protocol.TCP);
}
@DataProvider
public static Object[][] clientBuildersWithContentResponseOnWriteEnabledAndDisabled() {
Object[][] clientBuildersWithDisabledContentResponseOnWrite =
clientBuildersWithDirectSession(false, true, Protocol.TCP);
Object[][] clientBuildersWithEnabledContentResponseOnWrite =
clientBuildersWithDirectSession(true, true, Protocol.TCP);
int length = clientBuildersWithDisabledContentResponseOnWrite.length
+ clientBuildersWithEnabledContentResponseOnWrite.length;
Object[][] clientBuilders = new Object[length][];
int index = 0;
for (int i = 0; i < clientBuildersWithDisabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithDisabledContentResponseOnWrite[i];
}
for (int i = 0; i < clientBuildersWithEnabledContentResponseOnWrite.length; i++, index++) {
clientBuilders[index] = clientBuildersWithEnabledContentResponseOnWrite[i];
}
return clientBuilders;
}
@DataProvider
public static Object[][] clientBuildersWithDirectSession() {
return clientBuildersWithDirectSession(true, true, toArray(protocols));
}
@DataProvider
public static Object[][] clientBuildersWithDirectSessionIncludeComputeGateway() {
Object[][] originalProviders = clientBuildersWithDirectSession(
true,
true,
toArray(protocols));
List<Object[]> providers = new ArrayList<>(Arrays.asList(originalProviders));
Object[] injectedProviderParameters = new Object[1];
CosmosClientBuilder builder = createGatewayRxDocumentClient(
TestConfigurations.HOST.replace(ROUTING_GATEWAY_EMULATOR_PORT, COMPUTE_GATEWAY_EMULATOR_PORT),
ConsistencyLevel.SESSION,
false,
null,
true,
true);
injectedProviderParameters[0] = builder;
providers.add(injectedProviderParameters);
Object[][] array = new Object[providers.size()][];
return providers.toArray(array);
}
@DataProvider
public static Object[][] clientBuildersWithDirectTcpSession() {
return clientBuildersWithDirectSession(true, true, Protocol.TCP);
}
@DataProvider
public static Object[][] simpleClientBuilderGatewaySession() {
return clientBuildersWithDirectSession(true, true);
}
static Protocol[] toArray(List<Protocol> protocols) {
return protocols.toArray(new Protocol[protocols.size()]);
}
private static Object[][] clientBuildersWithDirectSession(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
return clientBuildersWithDirect(new ArrayList<ConsistencyLevel>() {{
add(ConsistencyLevel.SESSION);
}}, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
private static Object[][] clientBuildersWithDirectAllConsistencies(boolean contentResponseOnWriteEnabled, boolean retryOnThrottledRequests, Protocol... protocols) {
logger.info("Max test consistency to use is [{}]", accountConsistency);
return clientBuildersWithDirect(desiredConsistencies, contentResponseOnWriteEnabled, retryOnThrottledRequests, protocols);
}
static List<ConsistencyLevel> parseDesiredConsistencies(String consistencies) {
if (StringUtils.isEmpty(consistencies)) {
return null;
}
List<ConsistencyLevel> consistencyLevels = new ArrayList<>();
try {
List<String> consistencyStrings = objectMapper.readValue(consistencies, new TypeReference<List<String>>() {});
for(String consistency : consistencyStrings) {
consistencyLevels.add(ConsistencyLevel.valueOf(CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, consistency)));
}
return consistencyLevels;
} catch (Exception e) {
logger.error("INVALID consistency test desiredConsistencies [{}].", consistencies);
throw new IllegalStateException("INVALID configured test desiredConsistencies " + consistencies);
}
}
@SuppressWarnings("fallthrough")
static List<ConsistencyLevel> allEqualOrLowerConsistencies(ConsistencyLevel accountConsistency) {
List<ConsistencyLevel> testConsistencies = new ArrayList<>();
switch (accountConsistency) {
case STRONG:
testConsistencies.add(ConsistencyLevel.STRONG);
case BOUNDED_STALENESS:
testConsistencies.add(ConsistencyLevel.BOUNDED_STALENESS);
case SESSION:
testConsistencies.add(ConsistencyLevel.SESSION);
case CONSISTENT_PREFIX:
testConsistencies.add(ConsistencyLevel.CONSISTENT_PREFIX);
case EVENTUAL:
testConsistencies.add(ConsistencyLevel.EVENTUAL);
break;
default:
throw new IllegalStateException("INVALID configured test consistency " + accountConsistency);
}
return testConsistencies;
}
private static Object[][] clientBuildersWithDirect(
List<ConsistencyLevel> testConsistencies,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests,
Protocol... protocols) {
boolean isMultiMasterEnabled = preferredLocations != null && accountConsistency == ConsistencyLevel.SESSION;
List<CosmosClientBuilder> cosmosConfigurations = new ArrayList<>();
for (Protocol protocol : protocols) {
testConsistencies.forEach(consistencyLevel -> cosmosConfigurations.add(createDirectRxDocumentClient(consistencyLevel,
protocol,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests)));
}
cosmosConfigurations.forEach(c -> {
ConnectionPolicy connectionPolicy = CosmosBridgeInternal.getConnectionPolicy(c);
ConsistencyLevel consistencyLevel = CosmosBridgeInternal.getConsistencyLevel(c);
logger.info("Will Use ConnectionMode [{}], Consistency [{}], Protocol [{}]",
connectionPolicy.getConnectionMode(),
consistencyLevel,
extractConfigs(c).getProtocol()
);
});
cosmosConfigurations.add(
createGatewayRxDocumentClient(
ConsistencyLevel.SESSION,
isMultiMasterEnabled,
preferredLocations,
contentResponseOnWriteEnabled,
retryOnThrottledRequests));
return cosmosConfigurations.stream().map(c -> new Object[]{c}).collect(Collectors.toList()).toArray(new Object[0][]);
}
static protected CosmosClientBuilder createGatewayHouseKeepingDocumentClient(boolean contentResponseOnWriteEnabled) {
ThrottlingRetryOptions options = new ThrottlingRetryOptions();
options.setMaxRetryWaitTime(Duration.ofSeconds(SUITE_SETUP_TIMEOUT));
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
return new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.throttlingRetryOptions(options)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(ConsistencyLevel.SESSION);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
return createGatewayRxDocumentClient(
TestConfigurations.HOST,
consistencyLevel,
multiMasterEnabled,
preferredRegions,
contentResponseOnWriteEnabled,
retryOnThrottledRequests);
}
static protected CosmosClientBuilder createGatewayRxDocumentClient(
String endpoint,
ConsistencyLevel consistencyLevel,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig();
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(endpoint)
.credential(credential)
.gatewayMode(gatewayConnectionConfig)
.multipleWriteRegionsEnabled(multiMasterEnabled)
.preferredRegions(preferredRegions)
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
ImplementationBridgeHelpers
.CosmosClientBuilderHelper
.getCosmosClientBuilderAccessor()
.buildConnectionPolicy(builder);
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
return builder;
}
static protected CosmosClientBuilder createGatewayRxDocumentClient() {
return createGatewayRxDocumentClient(ConsistencyLevel.SESSION, false, null, true, true);
}
static protected CosmosClientBuilder createDirectRxDocumentClient(ConsistencyLevel consistencyLevel,
Protocol protocol,
boolean multiMasterEnabled,
List<String> preferredRegions,
boolean contentResponseOnWriteEnabled,
boolean retryOnThrottledRequests) {
CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(TestConfigurations.HOST)
.credential(credential)
.directMode(DirectConnectionConfig.getDefaultConfig())
.contentResponseOnWriteEnabled(contentResponseOnWriteEnabled)
.consistencyLevel(consistencyLevel);
if (preferredRegions != null) {
builder.preferredRegions(preferredRegions);
}
if (multiMasterEnabled && consistencyLevel == ConsistencyLevel.SESSION) {
builder.multipleWriteRegionsEnabled(true);
}
if (!retryOnThrottledRequests) {
builder.throttlingRetryOptions(new ThrottlingRetryOptions().setMaxRetryAttemptsOnThrottledRequests(0));
}
Configs configs = spy(new Configs());
doAnswer((Answer<Protocol>)invocation -> protocol).when(configs).getProtocol();
return injectConfigs(builder, configs);
}
protected int expectedNumberOfPages(int totalExpectedResult, int maxPageSize) {
return Math.max((totalExpectedResult + maxPageSize - 1 ) / maxPageSize, 1);
}
@DataProvider(name = "queryMetricsArgProvider")
public Object[][] queryMetricsArgProvider() {
return new Object[][]{
{true},
{false},
{null}
};
}
@DataProvider(name = "queryWithOrderByProvider")
public Object[][] queryWithOrderBy() {
return new Object[][]{
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c.id DESC", true },
{ "SELECT DISTINCT VALUE c.id from c ORDER BY c._ts DESC", false }
};
}
public static CosmosClientBuilder copyCosmosClientBuilder(CosmosClientBuilder builder) {
return CosmosBridgeInternal.cloneCosmosClientBuilder(builder);
}
public byte[] decodeHexString(String string) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
for (int i = 0; i < string.length(); i+=2) {
int b = Integer.parseInt(string.substring(i, i + 2), 16);
outputStream.write(b);
}
return outputStream.toByteArray();
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} | class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager {
public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) {
return new DatabaseManagerImpl(client);
}
private final CosmosAsyncClient client;
private DatabaseManagerImpl(CosmosAsyncClient client) {
this.client = client;
}
@Override
public CosmosPagedFlux<CosmosDatabaseProperties> queryDatabases(SqlQuerySpec query) {
return client.queryDatabases(query, null);
}
@Override
public Mono<CosmosDatabaseResponse> createDatabase(CosmosDatabaseProperties databaseDefinition) {
return client.createDatabase(databaseDefinition);
}
@Override
public CosmosAsyncDatabase getDatabase(String id) {
return client.getDatabase(id);
}
} |
Can we factor this into a method so we don't repeat all the logic? Could just have an extra logger in the cache case to emit an extra line that says "using cache" or the like. | public Mono<AccessToken> getToken(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
Mono<AccessToken> accessTokenMono;
if (cachedWorkingCredential.get() != null) {
accessTokenMono = Mono.defer(() -> cachedWorkingCredential.get().getToken(request)
.doOnNext(t -> LOGGER.info("Azure Identity => Returning token from cached credential {}",
cachedWorkingCredential.get().getClass().getSimpleName()))
.onErrorResume(Exception.class, t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
unavailableError + cachedWorkingCredential.get().getClass().getSimpleName()
+ " authentication failed. Error Details: " + t.getMessage(),
null, t));
}
exceptions.add((CredentialUnavailableException) t);
LOGGER.info("Azure Identity => Cached credential {} is unavailable.",
cachedWorkingCredential.get().getClass().getSimpleName());
return Mono.empty();
}));
} else {
accessTokenMono = Flux.fromIterable(credentials)
.flatMap(p -> p.getToken(request)
.doOnNext(t -> {
LOGGER.info("Azure Identity => Attempted credential {} returns a token",
p.getClass().getSimpleName());
cachedWorkingCredential.set(p);
}).onErrorResume(Exception.class, t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
unavailableError + p.getClass().getSimpleName()
+ " authentication failed. Error Details: " + t.getMessage(),
null, t));
}
exceptions.add((CredentialUnavailableException) t);
LOGGER.info("Azure Identity => Attempted credential {} is unavailable.",
p.getClass().getSimpleName());
return Mono.empty();
}), 1)
.next();
}
return accessTokenMono.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage()
+ (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
: ""));
}
return Mono.error(last);
}));
} | .next(); | public Mono<AccessToken> getToken(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
Mono<AccessToken> accessTokenMono;
if (selectedCredential.get() != null) {
accessTokenMono = Mono.defer(() -> selectedCredential.get().getToken(request)
.doOnNext(t -> logTokenMessage("Azure Identity => Returning token from cached credential {}",
selectedCredential.get()))
.onErrorResume(Exception.class, handleExceptionAsync(exceptions,
selectedCredential.get(), "Azure Identity => Cached credential {} is unavailable.")));
} else {
accessTokenMono = Flux.fromIterable(credentials)
.flatMap(p -> p.getToken(request)
.doOnNext(t -> {
logTokenMessage("Azure Identity => Attempted credential {} returns a token", p);
selectedCredential.set(p);
}).onErrorResume(Exception.class, handleExceptionAsync(exceptions, p,
"Azure Identity => Attempted credential {} is unavailable.")), 1)
.next();
}
return accessTokenMono.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage()
+ (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
return Mono.error(last);
}));
} | class ChainedTokenCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class);
private final List<TokenCredential> credentials;
private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> ";
private AtomicReference<TokenCredential> cachedWorkingCredential;
/**
* Create an instance of chained token credential that aggregates a list of token
* credentials.
*/
ChainedTokenCredential(List<TokenCredential> credentials) {
this.credentials = Collections.unmodifiableList(credentials);
cachedWorkingCredential = new AtomicReference<>();
}
/**
* Sequentially calls {@link TokenCredential
* returning the first successfully obtained {@link AccessToken}.
*
* This method is called automatically by Azure SDK client libraries.
* You may call this method directly, but you must also handle token
* caching and token refreshing.
*
* @param request the details of the token request
* @return a Publisher that emits a single access token
*/
@Override
@Override
public AccessToken getTokenSync(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
for (TokenCredential credential : credentials) {
try {
return credential.getTokenSync(request);
} catch (Exception e) {
if (e.getClass() != CredentialUnavailableException.class) {
throw new ClientAuthenticationException(
unavailableError + credential.getClass().getSimpleName()
+ " authentication failed. Error Details: " + e.getMessage(),
null, e);
} else {
if (e instanceof CredentialUnavailableException) {
exceptions.add((CredentialUnavailableException) e);
}
}
LOGGER.info("Azure Identity => Attempted credential {} is unavailable.",
credential.getClass().getSimpleName());
}
}
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage()
+ (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
: ""));
}
throw last;
}
WorkloadIdentityCredential getWorkloadIdentityCredentialIfPresent() {
List<TokenCredential> tokenCredentials = this.credentials
.stream().filter(tokenCredential -> tokenCredential instanceof WorkloadIdentityCredential)
.collect(Collectors.toList());
if (tokenCredentials.size() == 1) {
return (WorkloadIdentityCredential) tokenCredentials.get(0);
} else {
return null;
}
}
} | class ChainedTokenCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class);
private final List<TokenCredential> credentials;
private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> ";
private final AtomicReference<TokenCredential> selectedCredential;
/**
* Create an instance of chained token credential that aggregates a list of token
* credentials.
*/
ChainedTokenCredential(List<TokenCredential> credentials) {
this.credentials = Collections.unmodifiableList(credentials);
selectedCredential = new AtomicReference<>();
}
/**
* Sequentially calls {@link TokenCredential
* returning the first successfully obtained {@link AccessToken}.
*
* This method is called automatically by Azure SDK client libraries.
* You may call this method directly, but you must also handle token
* caching and token refreshing.
*
* @param request the details of the token request
* @return a Publisher that emits a single access token
*/
@Override
private Function<Exception, Mono<? extends AccessToken>> handleExceptionAsync(List<CredentialUnavailableException> exceptions,
TokenCredential p, String logMessage) {
return t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
getCredUnavailableMessage(p, t),
null, t));
}
exceptions.add((CredentialUnavailableException) t);
logTokenMessage(logMessage, p);
return Mono.empty();
};
}
@Override
public AccessToken getTokenSync(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
if (selectedCredential.get() != null) {
try {
AccessToken accessToken = selectedCredential.get().getTokenSync(request);
logTokenMessage("Azure Identity => Returning token from cached credential {}", selectedCredential.get());
return accessToken;
} catch (Exception e) {
handleExceptionSync(e, selectedCredential.get(), exceptions,
"Azure Identity => Cached credential {} is unavailable.", selectedCredential.get());
}
} else {
for (TokenCredential credential : credentials) {
try {
AccessToken accessToken = credential.getTokenSync(request);
logTokenMessage("Azure Identity => Attempted credential {} returns a token", credential);
selectedCredential.set(credential);
return accessToken;
} catch (Exception e) {
handleExceptionSync(e, credential, exceptions,
"Azure Identity => Attempted credential {} is unavailable.", credential);
}
}
}
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage()
+ (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
: ""));
}
throw last;
}
private void logTokenMessage(String format, TokenCredential selectedCredential) {
LOGGER.info(format,
selectedCredential.getClass().getSimpleName());
}
private String getCredUnavailableMessage(TokenCredential p, Exception t) {
return unavailableError + p.getClass().getSimpleName()
+ " authentication failed. Error Details: " + t.getMessage();
}
private void handleExceptionSync(Exception e, TokenCredential selectedCredential,
List<CredentialUnavailableException> exceptions, String logMessage,
TokenCredential selectedCredential1) {
if (e.getClass() != CredentialUnavailableException.class) {
throw new ClientAuthenticationException(
getCredUnavailableMessage(selectedCredential, e),
null, e);
} else {
if (e instanceof CredentialUnavailableException) {
exceptions.add((CredentialUnavailableException) e);
}
}
logTokenMessage(logMessage, selectedCredential1);
}
WorkloadIdentityCredential getWorkloadIdentityCredentialIfPresent() {
List<TokenCredential> tokenCredentials = this.credentials
.stream().filter(tokenCredential -> tokenCredential instanceof WorkloadIdentityCredential)
.collect(Collectors.toList());
if (tokenCredentials.size() == 1) {
return (WorkloadIdentityCredential) tokenCredentials.get(0);
} else {
return null;
}
}
} |
Yeah, agreed. Already have a change for it. | public Mono<AccessToken> getToken(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
Mono<AccessToken> accessTokenMono;
if (cachedWorkingCredential.get() != null) {
accessTokenMono = Mono.defer(() -> cachedWorkingCredential.get().getToken(request)
.doOnNext(t -> LOGGER.info("Azure Identity => Returning token from cached credential {}",
cachedWorkingCredential.get().getClass().getSimpleName()))
.onErrorResume(Exception.class, t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
unavailableError + cachedWorkingCredential.get().getClass().getSimpleName()
+ " authentication failed. Error Details: " + t.getMessage(),
null, t));
}
exceptions.add((CredentialUnavailableException) t);
LOGGER.info("Azure Identity => Cached credential {} is unavailable.",
cachedWorkingCredential.get().getClass().getSimpleName());
return Mono.empty();
}));
} else {
accessTokenMono = Flux.fromIterable(credentials)
.flatMap(p -> p.getToken(request)
.doOnNext(t -> {
LOGGER.info("Azure Identity => Attempted credential {} returns a token",
p.getClass().getSimpleName());
cachedWorkingCredential.set(p);
}).onErrorResume(Exception.class, t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
unavailableError + p.getClass().getSimpleName()
+ " authentication failed. Error Details: " + t.getMessage(),
null, t));
}
exceptions.add((CredentialUnavailableException) t);
LOGGER.info("Azure Identity => Attempted credential {} is unavailable.",
p.getClass().getSimpleName());
return Mono.empty();
}), 1)
.next();
}
return accessTokenMono.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage()
+ (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
: ""));
}
return Mono.error(last);
}));
} | .next(); | public Mono<AccessToken> getToken(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
Mono<AccessToken> accessTokenMono;
if (selectedCredential.get() != null) {
accessTokenMono = Mono.defer(() -> selectedCredential.get().getToken(request)
.doOnNext(t -> logTokenMessage("Azure Identity => Returning token from cached credential {}",
selectedCredential.get()))
.onErrorResume(Exception.class, handleExceptionAsync(exceptions,
selectedCredential.get(), "Azure Identity => Cached credential {} is unavailable.")));
} else {
accessTokenMono = Flux.fromIterable(credentials)
.flatMap(p -> p.getToken(request)
.doOnNext(t -> {
logTokenMessage("Azure Identity => Attempted credential {} returns a token", p);
selectedCredential.set(p);
}).onErrorResume(Exception.class, handleExceptionAsync(exceptions, p,
"Azure Identity => Attempted credential {} is unavailable.")), 1)
.next();
}
return accessTokenMono.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage()
+ (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
return Mono.error(last);
}));
} | class ChainedTokenCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class);
private final List<TokenCredential> credentials;
private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> ";
private AtomicReference<TokenCredential> cachedWorkingCredential;
/**
* Create an instance of chained token credential that aggregates a list of token
* credentials.
*/
ChainedTokenCredential(List<TokenCredential> credentials) {
this.credentials = Collections.unmodifiableList(credentials);
cachedWorkingCredential = new AtomicReference<>();
}
/**
* Sequentially calls {@link TokenCredential
* returning the first successfully obtained {@link AccessToken}.
*
* This method is called automatically by Azure SDK client libraries.
* You may call this method directly, but you must also handle token
* caching and token refreshing.
*
* @param request the details of the token request
* @return a Publisher that emits a single access token
*/
@Override
@Override
public AccessToken getTokenSync(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
for (TokenCredential credential : credentials) {
try {
return credential.getTokenSync(request);
} catch (Exception e) {
if (e.getClass() != CredentialUnavailableException.class) {
throw new ClientAuthenticationException(
unavailableError + credential.getClass().getSimpleName()
+ " authentication failed. Error Details: " + e.getMessage(),
null, e);
} else {
if (e instanceof CredentialUnavailableException) {
exceptions.add((CredentialUnavailableException) e);
}
}
LOGGER.info("Azure Identity => Attempted credential {} is unavailable.",
credential.getClass().getSimpleName());
}
}
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage()
+ (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
: ""));
}
throw last;
}
WorkloadIdentityCredential getWorkloadIdentityCredentialIfPresent() {
List<TokenCredential> tokenCredentials = this.credentials
.stream().filter(tokenCredential -> tokenCredential instanceof WorkloadIdentityCredential)
.collect(Collectors.toList());
if (tokenCredentials.size() == 1) {
return (WorkloadIdentityCredential) tokenCredentials.get(0);
} else {
return null;
}
}
} | class ChainedTokenCredential implements TokenCredential {
private static final ClientLogger LOGGER = new ClientLogger(ChainedTokenCredential.class);
private final List<TokenCredential> credentials;
private final String unavailableError = this.getClass().getSimpleName() + " authentication failed. ---> ";
private final AtomicReference<TokenCredential> selectedCredential;
/**
* Create an instance of chained token credential that aggregates a list of token
* credentials.
*/
ChainedTokenCredential(List<TokenCredential> credentials) {
this.credentials = Collections.unmodifiableList(credentials);
selectedCredential = new AtomicReference<>();
}
/**
* Sequentially calls {@link TokenCredential
* returning the first successfully obtained {@link AccessToken}.
*
* This method is called automatically by Azure SDK client libraries.
* You may call this method directly, but you must also handle token
* caching and token refreshing.
*
* @param request the details of the token request
* @return a Publisher that emits a single access token
*/
@Override
private Function<Exception, Mono<? extends AccessToken>> handleExceptionAsync(List<CredentialUnavailableException> exceptions,
TokenCredential p, String logMessage) {
return t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
getCredUnavailableMessage(p, t),
null, t));
}
exceptions.add((CredentialUnavailableException) t);
logTokenMessage(logMessage, p);
return Mono.empty();
};
}
@Override
public AccessToken getTokenSync(TokenRequestContext request) {
List<CredentialUnavailableException> exceptions = new ArrayList<>(4);
if (selectedCredential.get() != null) {
try {
AccessToken accessToken = selectedCredential.get().getTokenSync(request);
logTokenMessage("Azure Identity => Returning token from cached credential {}", selectedCredential.get());
return accessToken;
} catch (Exception e) {
handleExceptionSync(e, selectedCredential.get(), exceptions,
"Azure Identity => Cached credential {} is unavailable.", selectedCredential.get());
}
} else {
for (TokenCredential credential : credentials) {
try {
AccessToken accessToken = credential.getTokenSync(request);
logTokenMessage("Azure Identity => Attempted credential {} returns a token", credential);
selectedCredential.set(credential);
return accessToken;
} catch (Exception e) {
handleExceptionSync(e, credential, exceptions,
"Azure Identity => Attempted credential {} is unavailable.", credential);
}
}
}
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException(current.getMessage() + "\r\n" + last.getMessage()
+ (z == 0 ? "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
: ""));
}
throw last;
}
private void logTokenMessage(String format, TokenCredential selectedCredential) {
LOGGER.info(format,
selectedCredential.getClass().getSimpleName());
}
private String getCredUnavailableMessage(TokenCredential p, Exception t) {
return unavailableError + p.getClass().getSimpleName()
+ " authentication failed. Error Details: " + t.getMessage();
}
private void handleExceptionSync(Exception e, TokenCredential selectedCredential,
List<CredentialUnavailableException> exceptions, String logMessage,
TokenCredential selectedCredential1) {
if (e.getClass() != CredentialUnavailableException.class) {
throw new ClientAuthenticationException(
getCredUnavailableMessage(selectedCredential, e),
null, e);
} else {
if (e instanceof CredentialUnavailableException) {
exceptions.add((CredentialUnavailableException) e);
}
}
logTokenMessage(logMessage, selectedCredential1);
}
WorkloadIdentityCredential getWorkloadIdentityCredentialIfPresent() {
List<TokenCredential> tokenCredentials = this.credentials
.stream().filter(tokenCredential -> tokenCredential instanceof WorkloadIdentityCredential)
.collect(Collectors.toList());
if (tokenCredentials.size() == 1) {
return (WorkloadIdentityCredential) tokenCredentials.get(0);
} else {
return null;
}
}
} |
nit: reversed actual and expected. | public void testExpiresOnParsingAzureCli() {
Map<String, String> tokenDetails = new HashMap<>();
tokenDetails.put("expiresOn", "2023-10-31 21:59:10.000000");
OffsetDateTime offsetDateTime = IdentityClientBase.getTokenExpiryOffsetDateTime(tokenDetails);
Assertions.assertEquals(offsetDateTime.toEpochSecond(), 1698814750);
tokenDetails.put("expires_on", "1572371520");
offsetDateTime = IdentityClientBase.getTokenExpiryOffsetDateTime(tokenDetails);
Assertions.assertEquals(offsetDateTime.toEpochSecond(), 1572371520);
} | Assertions.assertEquals(offsetDateTime.toEpochSecond(), 1572371520); | public void testExpiresOnParsingAzureCli() {
Map<String, String> tokenDetails = new HashMap<>();
String expiresOn = "2023-10-31 21:59:10.000000";
tokenDetails.put("expiresOn", expiresOn);
OffsetDateTime offsetDateTime = IdentityClientBase.getTokenExpiryOffsetDateTime(tokenDetails);
Assertions.assertEquals(offsetDateTime.toEpochSecond(),
IdentityClientBase.parseExpiresOnTime(expiresOn).toEpochSecond());
tokenDetails.put("expires_on", "1572371520");
offsetDateTime = IdentityClientBase.getTokenExpiryOffsetDateTime(tokenDetails);
Assertions.assertEquals(offsetDateTime.toEpochSecond(), 1572371520);
} | class IdentityClientTests {
private static final String TENANT_ID = "contoso.com";
private static final String CLIENT_ID = UUID.randomUUID().toString();
@Test
public void testValidSecret() {
String secret = "secret";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientSecret(secret, request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).clientSecret(secret).build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
@Test
public void testInvalidSecret() {
String secret = "secret";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientSecret(secret, request, accessToken, expiresOn, () -> {
try {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).clientSecret("bad secret").build();
client.authenticateWithConfidentialClient(request).block();
fail();
} catch (MsalServiceException e) {
Assertions.assertEquals("Invalid clientSecret", e.getMessage());
}
});
}
@Test
public void testValidCertificate() {
String pfxPath = getClass().getResource("/keyStore.pfx").getPath();
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientCertificate(request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.certificatePath(pfxPath).certificatePassword("StrongPass!123").build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testPemCertificate() {
String pemPath;
URL pemUrl = getClass().getClassLoader().getResource("certificate.pem");
if (pemUrl.getPath().contains(":")) {
pemPath = pemUrl.getPath().substring(1);
} else {
pemPath = pemUrl.getPath();
}
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientPemCertificate(accessToken, request, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).certificatePath(pemPath).build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testInvalidCertificatePassword() {
String pfxPath = getClass().getResource("/keyStore.pfx").getPath();
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientCertificate(request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.certificatePath(pfxPath).certificatePassword("BadPassword").build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.verifyErrorSatisfies(e -> assertTrue(e.getMessage().contains("password was incorrect")));
});
}
@Test
public void testValidDeviceCodeFlow() {
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForDeviceCodeFlow(request, accessToken, expiresOn, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.identityClientOptions(options)
.build();
StepVerifier.create(client.authenticateWithDeviceCode(request, deviceCodeChallenge -> { /* do nothing */ }))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testValidServiceFabricCodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
String thumbprint = "950a2c88d57b5e19ac5119315f9ec199ff3cb823";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint)
.put("IDENTITY_HEADER", secret)
.put("IDENTITY_SERVER_THUMBPRINT", thumbprint));
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.toEpochSecond() + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.SERVICE_FABRIC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint)
.setIdentityHeader(secret)
.setIdentityServerThumbprint(thumbprint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForServiceFabricCodeFlow(tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testValidIdentityEndpointMSICodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint)
.put("IDENTITY_HEADER", secret));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.APP_SERVICE)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint)
.setIdentityHeader(secret))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForMSICodeFlow(tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testInValidIdentityEndpointSecretArcCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.ARC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
Assertions.assertThrows(ClientAuthenticationException.class,
() -> mockForArcCodeFlow(401, () -> {
client.getTokenFromTargetManagedIdentity(request).block();
}));
}
@Test
public void testInValidIdentityEndpointResponseCodeArcCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.ARC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
Assertions.assertThrows(ClientAuthenticationException.class,
() -> mockForArcCodeFlow(200, () -> client.getTokenFromTargetManagedIdentity(request).block()));
}
@Test
public void testValidIMDSCodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("MSI_ENDPOINT", endpoint)
.put("MSI_SECRET", secret));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM)
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForIMDSCodeFlow(IdentityConstants.DEFAULT_IMDS_ENDPOINT, tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testCustomIMDSCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put(Configuration.PROPERTY_AZURE_POD_IDENTITY_TOKEN_URL, endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM)
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForIMDSCodeFlow(endpoint, tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testAuthorizationCodeFlow() throws Exception {
String token1 = "token1";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
String authCode1 = "authCode1";
URI redirectUri = new URI("http:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForAuthorizationCodeFlow(token1, request, expiresAt, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithAuthorizationCode(request, authCode1, redirectUri))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testUserRefreshTokenflow() {
String token1 = "token1";
String token2 = "token1";
TokenRequestContext request2 = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForUserRefreshTokenFlow(token2, request2, expiresAt, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithPublicClientCache(request2, TestUtils.getMockMsalAccount(token1, expiresAt).block()))
.expectNextMatches(accessToken -> token2.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testUsernamePasswordCodeFlow() {
String username = "testuser";
String password = "testpassword";
String token = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForUsernamePasswordCodeFlow(token, request, expiresOn, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithUsernamePassword(request, username, password))
.expectNextMatches(accessToken -> token.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testBrowserAuthenicationCodeFlow() {
String username = "testuser";
String password = "testpassword";
String token = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
mockForBrowserAuthenticationCodeFlow(token, request, expiresOn, () -> {
StepVerifier.create(client.authenticateWithBrowserInteraction(request, 4567, null, null))
.expectNextMatches(accessToken -> token.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testOpenUrl() throws Exception {
try (MockedStatic<Runtime> runtimeMockedStatic = mockStatic(Runtime.class)) {
Runtime runtimeMock = mock(Runtime.class);
runtimeMockedStatic.when(Runtime::getRuntime).thenReturn(runtimeMock);
when(runtimeMock.exec(anyString())).thenReturn(null);
IdentityClient client = new IdentityClientBuilder().clientId("dummy").build();
client.openUrl("https:
verify(runtimeMock).exec(ArgumentMatchers.contains("https:
}
}
@Test
public void testAuthWithManagedIdentityFlow() {
String secret = "SYSTEM-ASSIGNED-CLIENT-SECRET";
String clientId = "SYSTEM-ASSIGNED-CLIENT-ID";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForManagedIdentityFlow(secret, clientId, request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID)
.clientId(clientId)
.clientSecret(secret)
.identityClientOptions(new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM))
.build();
AccessToken token = client.authenticateWithManagedIdentityConfidentialClient(request).block();
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
});
}
/****** mocks ******/
private void mockForManagedIdentityFlow(String secret, String clientId, TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.appTokenProvider(any())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.logPii(anyBoolean())).thenReturn(builder);
when(builder.validateAuthority(anyBoolean())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(clientId), argThat(cred -> ((IClientSecret) cred).clientSecret().equals(secret)))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> !((IClientSecret) cred).clientSecret().equals(secret)))).thenThrow(new MsalServiceException("Invalid clientSecret", "InvalidClientSecret"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(clientId)), any(IClientSecret.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
private void mockForClientSecret(String secret, TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), argThat(cred -> ((IClientSecret) cred).clientSecret().equals(secret)))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> !((IClientSecret) cred).clientSecret().equals(secret)))).thenThrow(new MsalServiceException("Invalid clientSecret", "InvalidClientSecret"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any(IClientSecret.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
private void mockForClientCertificate(TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), argThat(cred -> ((IClientCertificate) cred) != null))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> ((IClientCertificate) cred) == null))).thenThrow(new MsalServiceException("Invalid clientCertificate", "InvalidClientCertificate"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any(IClientCertificate.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
@Test
public void validateRedaction() {
String s = " WARNING: Could not retrieve credential from local cache for service principal *** under tenant organizations. Trying credential under tenant 72f988bf-86f1-41af-91ab-2d7cd011db47, assuming that is an app credential.\n"
+ " {\n"
+ " \"accessToken\": \"ANACCESSTOKEN\",\n"
+ " \"expiresOn\": \"2023-08-03 12:29:07.000000\",\n"
+ " \"subscription\": \"subscription\",\n"
+ " \"tenant\": \"tenant\",\n"
+ " \"tokenType\": \"Bearer\"\n"
+ " }";
IdentityClient client = new IdentityClientBuilder().clientId("dummy").build();
String redacted = client.redactInfo(s);
assertTrue(redacted.contains("****"));
assertFalse(redacted.contains("accessToken"));
}
private void mockForDeviceCodeFlow(TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(DeviceCodeFlowParameters.class))).thenAnswer(invocation -> {
DeviceCodeFlowParameters argument = (DeviceCodeFlowParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || !request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
if (argument.deviceCodeConsumer() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid device code consumer", "InvalidDeviceCodeConsumer");
});
}
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
});
when(builder.build()).thenReturn(application);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForClientPemCertificate(String accessToken, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<CertificateUtil> certificateUtilMock = mockStatic(CertificateUtil.class);
MockedStatic<ClientCredentialFactory> clientCredentialFactoryMock = mockStatic(ClientCredentialFactory.class);
MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class);
MockedConstruction<ConfidentialClientApplication.Builder> builderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
ConfidentialClientApplication application = mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})
) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), any())).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any())).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
PrivateKey privateKey = mock(PrivateKey.class);
IClientCertificate clientCertificate = mock(IClientCertificate.class);
certificateUtilMock.when(() -> CertificateUtil.privateKeyFromPem(any())).thenReturn(privateKey);
clientCredentialFactoryMock.when(() -> ClientCredentialFactory.createFromCertificate(any(PrivateKey.class), any(X509Certificate.class))).thenReturn(clientCertificate);
test.run();
Assertions.assertNotNull(builderMock);
}
}
private void mockForMSICodeFlow(String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForServiceFabricCodeFlow(String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpsURLConnection huc = mock(HttpsURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setSSLSocketFactory(any());
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForArcCodeFlow(int responseCode, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestProperty(anyString(), anyString());
doNothing().when(huc).connect();
when(url.openConnection()).thenReturn(huc);
when(huc.getInputStream()).thenThrow(new IOException());
when(huc.getResponseCode()).thenReturn(responseCode);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForIMDSCodeFlow(String endpoint, String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setConnectTimeout(anyInt());
doNothing().when(huc).connect();
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForBrowserAuthenticationCodeFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(InteractiveRequestParameters.class))).thenAnswer(invocation -> {
InteractiveRequestParameters argument = (InteractiveRequestParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForAuthorizationCodeFlow(String token1, TokenRequestContext request, OffsetDateTime expiresAt, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(AuthorizationCodeParameters.class))).thenAnswer(invocation -> {
AuthorizationCodeParameters argument = (AuthorizationCodeParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || !request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
if (argument.redirectUri() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid redirect uri", "InvalidAuthorizationCodeRedirectUri");
});
}
if (argument.authorizationCode() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid authorization code", "InvalidAuthorizationCode");
});
}
return TestUtils.getMockAuthenticationResult(token1, expiresAt);
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForUsernamePasswordCodeFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(UserNamePasswordParameters.class))).thenAnswer(invocation -> {
UserNamePasswordParameters argument = (UserNamePasswordParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForUserRefreshTokenFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireTokenSilently(any())).thenAnswer(invocation -> {
SilentParameters argument = (SilentParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
} | class IdentityClientTests {
private static final String TENANT_ID = "contoso.com";
private static final String CLIENT_ID = UUID.randomUUID().toString();
@Test
public void testValidSecret() {
String secret = "secret";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientSecret(secret, request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).clientSecret(secret).build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
@Test
public void testInvalidSecret() {
String secret = "secret";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientSecret(secret, request, accessToken, expiresOn, () -> {
try {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).clientSecret("bad secret").build();
client.authenticateWithConfidentialClient(request).block();
fail();
} catch (MsalServiceException e) {
Assertions.assertEquals("Invalid clientSecret", e.getMessage());
}
});
}
@Test
public void testValidCertificate() {
String pfxPath = getClass().getResource("/keyStore.pfx").getPath();
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientCertificate(request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.certificatePath(pfxPath).certificatePassword("StrongPass!123").build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testPemCertificate() {
String pemPath;
URL pemUrl = getClass().getClassLoader().getResource("certificate.pem");
if (pemUrl.getPath().contains(":")) {
pemPath = pemUrl.getPath().substring(1);
} else {
pemPath = pemUrl.getPath();
}
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientPemCertificate(accessToken, request, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).certificatePath(pemPath).build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testInvalidCertificatePassword() {
String pfxPath = getClass().getResource("/keyStore.pfx").getPath();
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientCertificate(request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.certificatePath(pfxPath).certificatePassword("BadPassword").build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.verifyErrorSatisfies(e -> assertTrue(e.getMessage().contains("password was incorrect")));
});
}
@Test
public void testValidDeviceCodeFlow() {
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForDeviceCodeFlow(request, accessToken, expiresOn, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.identityClientOptions(options)
.build();
StepVerifier.create(client.authenticateWithDeviceCode(request, deviceCodeChallenge -> { /* do nothing */ }))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testValidServiceFabricCodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
String thumbprint = "950a2c88d57b5e19ac5119315f9ec199ff3cb823";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint)
.put("IDENTITY_HEADER", secret)
.put("IDENTITY_SERVER_THUMBPRINT", thumbprint));
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.toEpochSecond() + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.SERVICE_FABRIC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint)
.setIdentityHeader(secret)
.setIdentityServerThumbprint(thumbprint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForServiceFabricCodeFlow(tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testValidIdentityEndpointMSICodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint)
.put("IDENTITY_HEADER", secret));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.APP_SERVICE)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint)
.setIdentityHeader(secret))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForMSICodeFlow(tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testInValidIdentityEndpointSecretArcCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.ARC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
Assertions.assertThrows(ClientAuthenticationException.class,
() -> mockForArcCodeFlow(401, () -> {
client.getTokenFromTargetManagedIdentity(request).block();
}));
}
@Test
public void testInValidIdentityEndpointResponseCodeArcCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.ARC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
Assertions.assertThrows(ClientAuthenticationException.class,
() -> mockForArcCodeFlow(200, () -> client.getTokenFromTargetManagedIdentity(request).block()));
}
@Test
public void testValidIMDSCodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("MSI_ENDPOINT", endpoint)
.put("MSI_SECRET", secret));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM)
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForIMDSCodeFlow(IdentityConstants.DEFAULT_IMDS_ENDPOINT, tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testCustomIMDSCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put(Configuration.PROPERTY_AZURE_POD_IDENTITY_TOKEN_URL, endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM)
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForIMDSCodeFlow(endpoint, tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testAuthorizationCodeFlow() throws Exception {
String token1 = "token1";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
String authCode1 = "authCode1";
URI redirectUri = new URI("http:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForAuthorizationCodeFlow(token1, request, expiresAt, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithAuthorizationCode(request, authCode1, redirectUri))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testUserRefreshTokenflow() {
String token1 = "token1";
String token2 = "token1";
TokenRequestContext request2 = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForUserRefreshTokenFlow(token2, request2, expiresAt, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithPublicClientCache(request2, TestUtils.getMockMsalAccount(token1, expiresAt).block()))
.expectNextMatches(accessToken -> token2.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testUsernamePasswordCodeFlow() {
String username = "testuser";
String password = "testpassword";
String token = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForUsernamePasswordCodeFlow(token, request, expiresOn, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithUsernamePassword(request, username, password))
.expectNextMatches(accessToken -> token.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testBrowserAuthenicationCodeFlow() {
String username = "testuser";
String password = "testpassword";
String token = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
mockForBrowserAuthenticationCodeFlow(token, request, expiresOn, () -> {
StepVerifier.create(client.authenticateWithBrowserInteraction(request, 4567, null, null))
.expectNextMatches(accessToken -> token.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testOpenUrl() throws Exception {
try (MockedStatic<Runtime> runtimeMockedStatic = mockStatic(Runtime.class)) {
Runtime runtimeMock = mock(Runtime.class);
runtimeMockedStatic.when(Runtime::getRuntime).thenReturn(runtimeMock);
when(runtimeMock.exec(anyString())).thenReturn(null);
IdentityClient client = new IdentityClientBuilder().clientId("dummy").build();
client.openUrl("https:
verify(runtimeMock).exec(ArgumentMatchers.contains("https:
}
}
@Test
public void testAuthWithManagedIdentityFlow() {
String secret = "SYSTEM-ASSIGNED-CLIENT-SECRET";
String clientId = "SYSTEM-ASSIGNED-CLIENT-ID";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForManagedIdentityFlow(secret, clientId, request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID)
.clientId(clientId)
.clientSecret(secret)
.identityClientOptions(new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM))
.build();
AccessToken token = client.authenticateWithManagedIdentityConfidentialClient(request).block();
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
});
}
/****** mocks ******/
private void mockForManagedIdentityFlow(String secret, String clientId, TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.appTokenProvider(any())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.logPii(anyBoolean())).thenReturn(builder);
when(builder.validateAuthority(anyBoolean())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(clientId), argThat(cred -> ((IClientSecret) cred).clientSecret().equals(secret)))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> !((IClientSecret) cred).clientSecret().equals(secret)))).thenThrow(new MsalServiceException("Invalid clientSecret", "InvalidClientSecret"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(clientId)), any(IClientSecret.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
private void mockForClientSecret(String secret, TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), argThat(cred -> ((IClientSecret) cred).clientSecret().equals(secret)))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> !((IClientSecret) cred).clientSecret().equals(secret)))).thenThrow(new MsalServiceException("Invalid clientSecret", "InvalidClientSecret"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any(IClientSecret.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
private void mockForClientCertificate(TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), argThat(cred -> ((IClientCertificate) cred) != null))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> ((IClientCertificate) cred) == null))).thenThrow(new MsalServiceException("Invalid clientCertificate", "InvalidClientCertificate"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any(IClientCertificate.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
@Test
public void validateRedaction() {
String s = " WARNING: Could not retrieve credential from local cache for service principal *** under tenant organizations. Trying credential under tenant 72f988bf-86f1-41af-91ab-2d7cd011db47, assuming that is an app credential.\n"
+ " {\n"
+ " \"accessToken\": \"ANACCESSTOKEN\",\n"
+ " \"expiresOn\": \"2023-08-03 12:29:07.000000\",\n"
+ " \"subscription\": \"subscription\",\n"
+ " \"tenant\": \"tenant\",\n"
+ " \"tokenType\": \"Bearer\"\n"
+ " }";
IdentityClient client = new IdentityClientBuilder().clientId("dummy").build();
String redacted = client.redactInfo(s);
assertTrue(redacted.contains("****"));
assertFalse(redacted.contains("accessToken"));
}
private void mockForDeviceCodeFlow(TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(DeviceCodeFlowParameters.class))).thenAnswer(invocation -> {
DeviceCodeFlowParameters argument = (DeviceCodeFlowParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || !request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
if (argument.deviceCodeConsumer() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid device code consumer", "InvalidDeviceCodeConsumer");
});
}
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
});
when(builder.build()).thenReturn(application);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForClientPemCertificate(String accessToken, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<CertificateUtil> certificateUtilMock = mockStatic(CertificateUtil.class);
MockedStatic<ClientCredentialFactory> clientCredentialFactoryMock = mockStatic(ClientCredentialFactory.class);
MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class);
MockedConstruction<ConfidentialClientApplication.Builder> builderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
ConfidentialClientApplication application = mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})
) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), any())).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any())).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
PrivateKey privateKey = mock(PrivateKey.class);
IClientCertificate clientCertificate = mock(IClientCertificate.class);
certificateUtilMock.when(() -> CertificateUtil.privateKeyFromPem(any())).thenReturn(privateKey);
clientCredentialFactoryMock.when(() -> ClientCredentialFactory.createFromCertificate(any(PrivateKey.class), any(X509Certificate.class))).thenReturn(clientCertificate);
test.run();
Assertions.assertNotNull(builderMock);
}
}
private void mockForMSICodeFlow(String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForServiceFabricCodeFlow(String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpsURLConnection huc = mock(HttpsURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setSSLSocketFactory(any());
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForArcCodeFlow(int responseCode, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestProperty(anyString(), anyString());
doNothing().when(huc).connect();
when(url.openConnection()).thenReturn(huc);
when(huc.getInputStream()).thenThrow(new IOException());
when(huc.getResponseCode()).thenReturn(responseCode);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForIMDSCodeFlow(String endpoint, String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setConnectTimeout(anyInt());
doNothing().when(huc).connect();
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForBrowserAuthenticationCodeFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(InteractiveRequestParameters.class))).thenAnswer(invocation -> {
InteractiveRequestParameters argument = (InteractiveRequestParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForAuthorizationCodeFlow(String token1, TokenRequestContext request, OffsetDateTime expiresAt, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(AuthorizationCodeParameters.class))).thenAnswer(invocation -> {
AuthorizationCodeParameters argument = (AuthorizationCodeParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || !request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
if (argument.redirectUri() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid redirect uri", "InvalidAuthorizationCodeRedirectUri");
});
}
if (argument.authorizationCode() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid authorization code", "InvalidAuthorizationCode");
});
}
return TestUtils.getMockAuthenticationResult(token1, expiresAt);
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForUsernamePasswordCodeFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(UserNamePasswordParameters.class))).thenAnswer(invocation -> {
UserNamePasswordParameters argument = (UserNamePasswordParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForUserRefreshTokenFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireTokenSilently(any())).thenAnswer(invocation -> {
SilentParameters argument = (SilentParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
} |
nit: reversed actual and expected. | public void testExpiresOnParsingAzureCli() {
Map<String, String> tokenDetails = new HashMap<>();
tokenDetails.put("expiresOn", "2023-10-31 21:59:10.000000");
OffsetDateTime offsetDateTime = IdentityClientBase.getTokenExpiryOffsetDateTime(tokenDetails);
Assertions.assertEquals(offsetDateTime.toEpochSecond(), 1698814750);
tokenDetails.put("expires_on", "1572371520");
offsetDateTime = IdentityClientBase.getTokenExpiryOffsetDateTime(tokenDetails);
Assertions.assertEquals(offsetDateTime.toEpochSecond(), 1572371520);
} | Assertions.assertEquals(offsetDateTime.toEpochSecond(), 1698814750); | public void testExpiresOnParsingAzureCli() {
Map<String, String> tokenDetails = new HashMap<>();
String expiresOn = "2023-10-31 21:59:10.000000";
tokenDetails.put("expiresOn", expiresOn);
OffsetDateTime offsetDateTime = IdentityClientBase.getTokenExpiryOffsetDateTime(tokenDetails);
Assertions.assertEquals(offsetDateTime.toEpochSecond(),
IdentityClientBase.parseExpiresOnTime(expiresOn).toEpochSecond());
tokenDetails.put("expires_on", "1572371520");
offsetDateTime = IdentityClientBase.getTokenExpiryOffsetDateTime(tokenDetails);
Assertions.assertEquals(offsetDateTime.toEpochSecond(), 1572371520);
} | class IdentityClientTests {
private static final String TENANT_ID = "contoso.com";
private static final String CLIENT_ID = UUID.randomUUID().toString();
@Test
public void testValidSecret() {
String secret = "secret";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientSecret(secret, request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).clientSecret(secret).build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
@Test
public void testInvalidSecret() {
String secret = "secret";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientSecret(secret, request, accessToken, expiresOn, () -> {
try {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).clientSecret("bad secret").build();
client.authenticateWithConfidentialClient(request).block();
fail();
} catch (MsalServiceException e) {
Assertions.assertEquals("Invalid clientSecret", e.getMessage());
}
});
}
@Test
public void testValidCertificate() {
String pfxPath = getClass().getResource("/keyStore.pfx").getPath();
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientCertificate(request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.certificatePath(pfxPath).certificatePassword("StrongPass!123").build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testPemCertificate() {
String pemPath;
URL pemUrl = getClass().getClassLoader().getResource("certificate.pem");
if (pemUrl.getPath().contains(":")) {
pemPath = pemUrl.getPath().substring(1);
} else {
pemPath = pemUrl.getPath();
}
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientPemCertificate(accessToken, request, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).certificatePath(pemPath).build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testInvalidCertificatePassword() {
String pfxPath = getClass().getResource("/keyStore.pfx").getPath();
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientCertificate(request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.certificatePath(pfxPath).certificatePassword("BadPassword").build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.verifyErrorSatisfies(e -> assertTrue(e.getMessage().contains("password was incorrect")));
});
}
@Test
public void testValidDeviceCodeFlow() {
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForDeviceCodeFlow(request, accessToken, expiresOn, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.identityClientOptions(options)
.build();
StepVerifier.create(client.authenticateWithDeviceCode(request, deviceCodeChallenge -> { /* do nothing */ }))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testValidServiceFabricCodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
String thumbprint = "950a2c88d57b5e19ac5119315f9ec199ff3cb823";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint)
.put("IDENTITY_HEADER", secret)
.put("IDENTITY_SERVER_THUMBPRINT", thumbprint));
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.toEpochSecond() + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.SERVICE_FABRIC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint)
.setIdentityHeader(secret)
.setIdentityServerThumbprint(thumbprint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForServiceFabricCodeFlow(tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testValidIdentityEndpointMSICodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint)
.put("IDENTITY_HEADER", secret));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.APP_SERVICE)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint)
.setIdentityHeader(secret))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForMSICodeFlow(tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testInValidIdentityEndpointSecretArcCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.ARC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
Assertions.assertThrows(ClientAuthenticationException.class,
() -> mockForArcCodeFlow(401, () -> {
client.getTokenFromTargetManagedIdentity(request).block();
}));
}
@Test
public void testInValidIdentityEndpointResponseCodeArcCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.ARC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
Assertions.assertThrows(ClientAuthenticationException.class,
() -> mockForArcCodeFlow(200, () -> client.getTokenFromTargetManagedIdentity(request).block()));
}
@Test
public void testValidIMDSCodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("MSI_ENDPOINT", endpoint)
.put("MSI_SECRET", secret));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM)
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForIMDSCodeFlow(IdentityConstants.DEFAULT_IMDS_ENDPOINT, tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testCustomIMDSCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put(Configuration.PROPERTY_AZURE_POD_IDENTITY_TOKEN_URL, endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM)
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForIMDSCodeFlow(endpoint, tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testAuthorizationCodeFlow() throws Exception {
String token1 = "token1";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
String authCode1 = "authCode1";
URI redirectUri = new URI("http:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForAuthorizationCodeFlow(token1, request, expiresAt, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithAuthorizationCode(request, authCode1, redirectUri))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testUserRefreshTokenflow() {
String token1 = "token1";
String token2 = "token1";
TokenRequestContext request2 = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForUserRefreshTokenFlow(token2, request2, expiresAt, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithPublicClientCache(request2, TestUtils.getMockMsalAccount(token1, expiresAt).block()))
.expectNextMatches(accessToken -> token2.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testUsernamePasswordCodeFlow() {
String username = "testuser";
String password = "testpassword";
String token = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForUsernamePasswordCodeFlow(token, request, expiresOn, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithUsernamePassword(request, username, password))
.expectNextMatches(accessToken -> token.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testBrowserAuthenicationCodeFlow() {
String username = "testuser";
String password = "testpassword";
String token = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
mockForBrowserAuthenticationCodeFlow(token, request, expiresOn, () -> {
StepVerifier.create(client.authenticateWithBrowserInteraction(request, 4567, null, null))
.expectNextMatches(accessToken -> token.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testOpenUrl() throws Exception {
try (MockedStatic<Runtime> runtimeMockedStatic = mockStatic(Runtime.class)) {
Runtime runtimeMock = mock(Runtime.class);
runtimeMockedStatic.when(Runtime::getRuntime).thenReturn(runtimeMock);
when(runtimeMock.exec(anyString())).thenReturn(null);
IdentityClient client = new IdentityClientBuilder().clientId("dummy").build();
client.openUrl("https:
verify(runtimeMock).exec(ArgumentMatchers.contains("https:
}
}
@Test
public void testAuthWithManagedIdentityFlow() {
String secret = "SYSTEM-ASSIGNED-CLIENT-SECRET";
String clientId = "SYSTEM-ASSIGNED-CLIENT-ID";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForManagedIdentityFlow(secret, clientId, request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID)
.clientId(clientId)
.clientSecret(secret)
.identityClientOptions(new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM))
.build();
AccessToken token = client.authenticateWithManagedIdentityConfidentialClient(request).block();
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
});
}
/****** mocks ******/
private void mockForManagedIdentityFlow(String secret, String clientId, TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.appTokenProvider(any())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.logPii(anyBoolean())).thenReturn(builder);
when(builder.validateAuthority(anyBoolean())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(clientId), argThat(cred -> ((IClientSecret) cred).clientSecret().equals(secret)))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> !((IClientSecret) cred).clientSecret().equals(secret)))).thenThrow(new MsalServiceException("Invalid clientSecret", "InvalidClientSecret"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(clientId)), any(IClientSecret.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
private void mockForClientSecret(String secret, TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), argThat(cred -> ((IClientSecret) cred).clientSecret().equals(secret)))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> !((IClientSecret) cred).clientSecret().equals(secret)))).thenThrow(new MsalServiceException("Invalid clientSecret", "InvalidClientSecret"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any(IClientSecret.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
private void mockForClientCertificate(TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), argThat(cred -> ((IClientCertificate) cred) != null))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> ((IClientCertificate) cred) == null))).thenThrow(new MsalServiceException("Invalid clientCertificate", "InvalidClientCertificate"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any(IClientCertificate.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
@Test
public void validateRedaction() {
String s = " WARNING: Could not retrieve credential from local cache for service principal *** under tenant organizations. Trying credential under tenant 72f988bf-86f1-41af-91ab-2d7cd011db47, assuming that is an app credential.\n"
+ " {\n"
+ " \"accessToken\": \"ANACCESSTOKEN\",\n"
+ " \"expiresOn\": \"2023-08-03 12:29:07.000000\",\n"
+ " \"subscription\": \"subscription\",\n"
+ " \"tenant\": \"tenant\",\n"
+ " \"tokenType\": \"Bearer\"\n"
+ " }";
IdentityClient client = new IdentityClientBuilder().clientId("dummy").build();
String redacted = client.redactInfo(s);
assertTrue(redacted.contains("****"));
assertFalse(redacted.contains("accessToken"));
}
private void mockForDeviceCodeFlow(TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(DeviceCodeFlowParameters.class))).thenAnswer(invocation -> {
DeviceCodeFlowParameters argument = (DeviceCodeFlowParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || !request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
if (argument.deviceCodeConsumer() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid device code consumer", "InvalidDeviceCodeConsumer");
});
}
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
});
when(builder.build()).thenReturn(application);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForClientPemCertificate(String accessToken, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<CertificateUtil> certificateUtilMock = mockStatic(CertificateUtil.class);
MockedStatic<ClientCredentialFactory> clientCredentialFactoryMock = mockStatic(ClientCredentialFactory.class);
MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class);
MockedConstruction<ConfidentialClientApplication.Builder> builderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
ConfidentialClientApplication application = mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})
) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), any())).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any())).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
PrivateKey privateKey = mock(PrivateKey.class);
IClientCertificate clientCertificate = mock(IClientCertificate.class);
certificateUtilMock.when(() -> CertificateUtil.privateKeyFromPem(any())).thenReturn(privateKey);
clientCredentialFactoryMock.when(() -> ClientCredentialFactory.createFromCertificate(any(PrivateKey.class), any(X509Certificate.class))).thenReturn(clientCertificate);
test.run();
Assertions.assertNotNull(builderMock);
}
}
private void mockForMSICodeFlow(String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForServiceFabricCodeFlow(String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpsURLConnection huc = mock(HttpsURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setSSLSocketFactory(any());
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForArcCodeFlow(int responseCode, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestProperty(anyString(), anyString());
doNothing().when(huc).connect();
when(url.openConnection()).thenReturn(huc);
when(huc.getInputStream()).thenThrow(new IOException());
when(huc.getResponseCode()).thenReturn(responseCode);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForIMDSCodeFlow(String endpoint, String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setConnectTimeout(anyInt());
doNothing().when(huc).connect();
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForBrowserAuthenticationCodeFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(InteractiveRequestParameters.class))).thenAnswer(invocation -> {
InteractiveRequestParameters argument = (InteractiveRequestParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForAuthorizationCodeFlow(String token1, TokenRequestContext request, OffsetDateTime expiresAt, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(AuthorizationCodeParameters.class))).thenAnswer(invocation -> {
AuthorizationCodeParameters argument = (AuthorizationCodeParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || !request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
if (argument.redirectUri() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid redirect uri", "InvalidAuthorizationCodeRedirectUri");
});
}
if (argument.authorizationCode() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid authorization code", "InvalidAuthorizationCode");
});
}
return TestUtils.getMockAuthenticationResult(token1, expiresAt);
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForUsernamePasswordCodeFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(UserNamePasswordParameters.class))).thenAnswer(invocation -> {
UserNamePasswordParameters argument = (UserNamePasswordParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForUserRefreshTokenFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireTokenSilently(any())).thenAnswer(invocation -> {
SilentParameters argument = (SilentParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
} | class IdentityClientTests {
private static final String TENANT_ID = "contoso.com";
private static final String CLIENT_ID = UUID.randomUUID().toString();
@Test
public void testValidSecret() {
String secret = "secret";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientSecret(secret, request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).clientSecret(secret).build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
@Test
public void testInvalidSecret() {
String secret = "secret";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientSecret(secret, request, accessToken, expiresOn, () -> {
try {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).clientSecret("bad secret").build();
client.authenticateWithConfidentialClient(request).block();
fail();
} catch (MsalServiceException e) {
Assertions.assertEquals("Invalid clientSecret", e.getMessage());
}
});
}
@Test
public void testValidCertificate() {
String pfxPath = getClass().getResource("/keyStore.pfx").getPath();
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientCertificate(request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.certificatePath(pfxPath).certificatePassword("StrongPass!123").build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testPemCertificate() {
String pemPath;
URL pemUrl = getClass().getClassLoader().getResource("certificate.pem");
if (pemUrl.getPath().contains(":")) {
pemPath = pemUrl.getPath().substring(1);
} else {
pemPath = pemUrl.getPath();
}
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientPemCertificate(accessToken, request, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID).clientId(CLIENT_ID).certificatePath(pemPath).build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testInvalidCertificatePassword() {
String pfxPath = getClass().getResource("/keyStore.pfx").getPath();
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForClientCertificate(request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.certificatePath(pfxPath).certificatePassword("BadPassword").build();
StepVerifier.create(client.authenticateWithConfidentialClient(request))
.verifyErrorSatisfies(e -> assertTrue(e.getMessage().contains("password was incorrect")));
});
}
@Test
public void testValidDeviceCodeFlow() {
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForDeviceCodeFlow(request, accessToken, expiresOn, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID)
.identityClientOptions(options)
.build();
StepVerifier.create(client.authenticateWithDeviceCode(request, deviceCodeChallenge -> { /* do nothing */ }))
.assertNext(token -> {
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testValidServiceFabricCodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
String thumbprint = "950a2c88d57b5e19ac5119315f9ec199ff3cb823";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint)
.put("IDENTITY_HEADER", secret)
.put("IDENTITY_SERVER_THUMBPRINT", thumbprint));
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.toEpochSecond() + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.SERVICE_FABRIC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint)
.setIdentityHeader(secret)
.setIdentityServerThumbprint(thumbprint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForServiceFabricCodeFlow(tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testValidIdentityEndpointMSICodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint)
.put("IDENTITY_HEADER", secret));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.APP_SERVICE)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint)
.setIdentityHeader(secret))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForMSICodeFlow(tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testInValidIdentityEndpointSecretArcCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.ARC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
Assertions.assertThrows(ClientAuthenticationException.class,
() -> mockForArcCodeFlow(401, () -> {
client.getTokenFromTargetManagedIdentity(request).block();
}));
}
@Test
public void testInValidIdentityEndpointResponseCodeArcCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("IDENTITY_ENDPOINT", endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.ARC)
.setManagedIdentityParameters(new ManagedIdentityParameters()
.setIdentityEndpoint(endpoint))
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
Assertions.assertThrows(ClientAuthenticationException.class,
() -> mockForArcCodeFlow(200, () -> client.getTokenFromTargetManagedIdentity(request).block()));
}
@Test
public void testValidIMDSCodeFlow() throws Exception {
String endpoint = "http:
String secret = "secret";
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put("MSI_ENDPOINT", endpoint)
.put("MSI_SECRET", secret));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM)
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForIMDSCodeFlow(IdentityConstants.DEFAULT_IMDS_ENDPOINT, tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testCustomIMDSCodeFlow() throws Exception {
String endpoint = "http:
Configuration configuration = TestUtils.createTestConfiguration(new TestConfigurationSource()
.put(Configuration.PROPERTY_AZURE_POD_IDENTITY_TOKEN_URL, endpoint));
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("M/d/yyyy H:mm:ss XXX");
String tokenJson = "{ \"access_token\" : \"token1\", \"expires_on\" : \"" + expiresOn.format(dtf) + "\" }";
IdentityClientOptions options = new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM)
.setConfiguration(configuration);
IdentityClient client = new IdentityClientBuilder().identityClientOptions(options).build();
mockForIMDSCodeFlow(endpoint, tokenJson, () -> {
StepVerifier.create(client.getTokenFromTargetManagedIdentity(request))
.assertNext(token -> {
Assertions.assertEquals("token1", token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
})
.verifyComplete();
});
}
@Test
public void testAuthorizationCodeFlow() throws Exception {
String token1 = "token1";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
String authCode1 = "authCode1";
URI redirectUri = new URI("http:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForAuthorizationCodeFlow(token1, request, expiresAt, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithAuthorizationCode(request, authCode1, redirectUri))
.expectNextMatches(accessToken -> token1.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testUserRefreshTokenflow() {
String token1 = "token1";
String token2 = "token1";
TokenRequestContext request2 = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresAt = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForUserRefreshTokenFlow(token2, request2, expiresAt, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithPublicClientCache(request2, TestUtils.getMockMsalAccount(token1, expiresAt).block()))
.expectNextMatches(accessToken -> token2.equals(accessToken.getToken())
&& expiresAt.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testUsernamePasswordCodeFlow() {
String username = "testuser";
String password = "testpassword";
String token = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForUsernamePasswordCodeFlow(token, request, expiresOn, () -> {
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
StepVerifier.create(client.authenticateWithUsernamePassword(request, username, password))
.expectNextMatches(accessToken -> token.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testBrowserAuthenicationCodeFlow() {
String username = "testuser";
String password = "testpassword";
String token = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
IdentityClientOptions options = new IdentityClientOptions();
IdentityClient client = new IdentityClientBuilder().tenantId(TENANT_ID).clientId(CLIENT_ID).identityClientOptions(options).build();
mockForBrowserAuthenticationCodeFlow(token, request, expiresOn, () -> {
StepVerifier.create(client.authenticateWithBrowserInteraction(request, 4567, null, null))
.expectNextMatches(accessToken -> token.equals(accessToken.getToken())
&& expiresOn.getSecond() == accessToken.getExpiresAt().getSecond())
.verifyComplete();
});
}
@Test
public void testOpenUrl() throws Exception {
try (MockedStatic<Runtime> runtimeMockedStatic = mockStatic(Runtime.class)) {
Runtime runtimeMock = mock(Runtime.class);
runtimeMockedStatic.when(Runtime::getRuntime).thenReturn(runtimeMock);
when(runtimeMock.exec(anyString())).thenReturn(null);
IdentityClient client = new IdentityClientBuilder().clientId("dummy").build();
client.openUrl("https:
verify(runtimeMock).exec(ArgumentMatchers.contains("https:
}
}
@Test
public void testAuthWithManagedIdentityFlow() {
String secret = "SYSTEM-ASSIGNED-CLIENT-SECRET";
String clientId = "SYSTEM-ASSIGNED-CLIENT-ID";
String accessToken = "token";
TokenRequestContext request = new TokenRequestContext().addScopes("https:
OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plusHours(1);
mockForManagedIdentityFlow(secret, clientId, request, accessToken, expiresOn, () -> {
IdentityClient client = new IdentityClientBuilder()
.tenantId(TENANT_ID)
.clientId(clientId)
.clientSecret(secret)
.identityClientOptions(new IdentityClientOptions()
.setManagedIdentityType(ManagedIdentityType.VM))
.build();
AccessToken token = client.authenticateWithManagedIdentityConfidentialClient(request).block();
Assertions.assertEquals(accessToken, token.getToken());
Assertions.assertEquals(expiresOn.getSecond(), token.getExpiresAt().getSecond());
});
}
/****** mocks ******/
private void mockForManagedIdentityFlow(String secret, String clientId, TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.appTokenProvider(any())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.logPii(anyBoolean())).thenReturn(builder);
when(builder.validateAuthority(anyBoolean())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(clientId), argThat(cred -> ((IClientSecret) cred).clientSecret().equals(secret)))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> !((IClientSecret) cred).clientSecret().equals(secret)))).thenThrow(new MsalServiceException("Invalid clientSecret", "InvalidClientSecret"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(clientId)), any(IClientSecret.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
private void mockForClientSecret(String secret, TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), argThat(cred -> ((IClientSecret) cred).clientSecret().equals(secret)))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> !((IClientSecret) cred).clientSecret().equals(secret)))).thenThrow(new MsalServiceException("Invalid clientSecret", "InvalidClientSecret"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any(IClientSecret.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
private void mockForClientCertificate(TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class); MockedConstruction<ConfidentialClientApplication.Builder> confidentialClientApplicationBuilderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
ConfidentialClientApplication application = Mockito.mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
})) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), argThat(cred -> ((IClientCertificate) cred) != null))).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(anyString(), argThat(cred -> ((IClientCertificate) cred) == null))).thenThrow(new MsalServiceException("Invalid clientCertificate", "InvalidClientCertificate"));
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any(IClientCertificate.class))).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
test.run();
Assertions.assertNotNull(confidentialClientApplicationBuilderMock);
}
}
@Test
public void validateRedaction() {
String s = " WARNING: Could not retrieve credential from local cache for service principal *** under tenant organizations. Trying credential under tenant 72f988bf-86f1-41af-91ab-2d7cd011db47, assuming that is an app credential.\n"
+ " {\n"
+ " \"accessToken\": \"ANACCESSTOKEN\",\n"
+ " \"expiresOn\": \"2023-08-03 12:29:07.000000\",\n"
+ " \"subscription\": \"subscription\",\n"
+ " \"tenant\": \"tenant\",\n"
+ " \"tokenType\": \"Bearer\"\n"
+ " }";
IdentityClient client = new IdentityClientBuilder().clientId("dummy").build();
String redacted = client.redactInfo(s);
assertTrue(redacted.contains("****"));
assertFalse(redacted.contains("accessToken"));
}
private void mockForDeviceCodeFlow(TokenRequestContext request, String accessToken, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
when(builder.authority(any())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(DeviceCodeFlowParameters.class))).thenAnswer(invocation -> {
DeviceCodeFlowParameters argument = (DeviceCodeFlowParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || !request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
if (argument.deviceCodeConsumer() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid device code consumer", "InvalidDeviceCodeConsumer");
});
}
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
});
when(builder.build()).thenReturn(application);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForClientPemCertificate(String accessToken, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedStatic<CertificateUtil> certificateUtilMock = mockStatic(CertificateUtil.class);
MockedStatic<ClientCredentialFactory> clientCredentialFactoryMock = mockStatic(ClientCredentialFactory.class);
MockedStatic<ConfidentialClientApplication> staticConfidentialClientApplicationMock = mockStatic(ConfidentialClientApplication.class);
MockedConstruction<ConfidentialClientApplication.Builder> builderMock = mockConstruction(ConfidentialClientApplication.Builder.class, (builder, context) -> {
ConfidentialClientApplication application = mock(ConfidentialClientApplication.class);
when(application.acquireToken(any(ClientCredentialParameters.class))).thenAnswer(invocation -> {
ClientCredentialParameters argument = (ClientCredentialParameters) invocation.getArguments()[0];
if (argument.scopes().size() == 1 && request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(accessToken, expiresOn);
} else {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})
) {
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(eq(CLIENT_ID), any())).thenCallRealMethod();
staticConfidentialClientApplicationMock.when(() -> ConfidentialClientApplication.builder(AdditionalMatchers.not(eq(CLIENT_ID)), any())).thenThrow(new MsalServiceException("Invalid CLIENT_ID", "InvalidClientId"));
PrivateKey privateKey = mock(PrivateKey.class);
IClientCertificate clientCertificate = mock(IClientCertificate.class);
certificateUtilMock.when(() -> CertificateUtil.privateKeyFromPem(any())).thenReturn(privateKey);
clientCredentialFactoryMock.when(() -> ClientCredentialFactory.createFromCertificate(any(PrivateKey.class), any(X509Certificate.class))).thenReturn(clientCertificate);
test.run();
Assertions.assertNotNull(builderMock);
}
}
private void mockForMSICodeFlow(String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForServiceFabricCodeFlow(String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpsURLConnection huc = mock(HttpsURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setSSLSocketFactory(any());
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForArcCodeFlow(int responseCode, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setRequestProperty(anyString(), anyString());
doNothing().when(huc).connect();
when(url.openConnection()).thenReturn(huc);
when(huc.getInputStream()).thenThrow(new IOException());
when(huc.getResponseCode()).thenReturn(responseCode);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForIMDSCodeFlow(String endpoint, String tokenJson, Runnable test) throws Exception {
try (MockedStatic<IdentityClientBase> identityClientMockedStatic = mockStatic(IdentityClientBase.class)) {
URL url = mock(URL.class);
HttpURLConnection huc = mock(HttpURLConnection.class);
doNothing().when(huc).setRequestMethod(anyString());
doNothing().when(huc).setConnectTimeout(anyInt());
doNothing().when(huc).connect();
when(url.openConnection()).thenReturn(huc);
InputStream inputStream = new ByteArrayInputStream(tokenJson.getBytes(Charset.defaultCharset()));
when(huc.getInputStream()).thenReturn(inputStream);
identityClientMockedStatic.when(() -> IdentityClientBase.getUrl(anyString())).thenReturn(url);
test.run();
}
}
private void mockForBrowserAuthenticationCodeFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(InteractiveRequestParameters.class))).thenAnswer(invocation -> {
InteractiveRequestParameters argument = (InteractiveRequestParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForAuthorizationCodeFlow(String token1, TokenRequestContext request, OffsetDateTime expiresAt, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(AuthorizationCodeParameters.class))).thenAnswer(invocation -> {
AuthorizationCodeParameters argument = (AuthorizationCodeParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || !request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid request", "InvalidScopes");
});
}
if (argument.redirectUri() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid redirect uri", "InvalidAuthorizationCodeRedirectUri");
});
}
if (argument.authorizationCode() == null) {
return CompletableFuture.runAsync(() -> {
throw new MsalServiceException("Invalid authorization code", "InvalidAuthorizationCode");
});
}
return TestUtils.getMockAuthenticationResult(token1, expiresAt);
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForUsernamePasswordCodeFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireToken(any(UserNamePasswordParameters.class))).thenAnswer(invocation -> {
UserNamePasswordParameters argument = (UserNamePasswordParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
private void mockForUserRefreshTokenFlow(String token, TokenRequestContext request, OffsetDateTime expiresOn, Runnable test) {
try (MockedConstruction<PublicClientApplication.Builder> publicClientApplicationMock = mockConstruction(PublicClientApplication.Builder.class, (builder, context) -> {
PublicClientApplication application = Mockito.mock(PublicClientApplication.class);
when(application.acquireTokenSilently(any())).thenAnswer(invocation -> {
SilentParameters argument = (SilentParameters) invocation.getArguments()[0];
if (argument.scopes().size() != 1 || request.getScopes().get(0).equals(argument.scopes().iterator().next())) {
return TestUtils.getMockAuthenticationResult(token, expiresOn);
} else {
throw new InvalidUseOfMatchersException(String.format("Argument %s does not match", (Object) argument));
}
});
when(builder.build()).thenReturn(application);
when(builder.authority(any())).thenReturn(builder);
when(builder.instanceDiscovery(anyBoolean())).thenReturn(builder);
when(builder.httpClient(any())).thenReturn(builder);
when(builder.logPii(anyBoolean())).thenReturn(builder);
})) {
test.run();
Assertions.assertNotNull(publicClientApplicationMock);
}
}
} |
Instead of a timeout here should we just apply a `@Timeout` to the test class? | private String runCommand(String... args) {
try {
StringBuilder command = new StringBuilder();
for (String arg : args) {
command.append(arg).append(" ");
}
System.out.println("Running command: " + command);
ProcessBuilder processBuilder = new ProcessBuilder(args);
processBuilder.redirectErrorStream(true);
Process process = processBuilder.start();
boolean finished = process.waitFor(300, TimeUnit.SECONDS);
if (!finished) {
process.destroy();
throw new RuntimeException("Process execution timeout");
}
StringBuilder output = new StringBuilder();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) {
String line;
while ((line = reader.readLine()) != null) {
output.append(line).append(System.lineSeparator());
}
}
System.out.println("Output:" + System.lineSeparator() + output);
return output.toString();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
} | boolean finished = process.waitFor(300, TimeUnit.SECONDS); | private String runCommand(String... args) {
try {
StringBuilder command = new StringBuilder();
for (String arg : args) {
command.append(arg).append(" ");
}
System.out.println("Running command: " + command);
ProcessBuilder processBuilder = new ProcessBuilder(args);
processBuilder.redirectErrorStream(true);
Process process = processBuilder.start();
process.waitFor();
StringBuilder output = new StringBuilder();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) {
String line;
while ((line = reader.readLine()) != null) {
output.append(line).append(System.lineSeparator());
}
}
System.out.println("Output:" + System.lineSeparator() + output);
return output.toString();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
} | class LiveManagedIdentityTests extends TestBase {
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void testManagedIdentityFuncDeployment() {
HttpClient client = HttpClient.createDefault();
String functionUrl = "https:
HttpRequest request = new HttpRequest(HttpMethod.GET, functionUrl);
try (HttpResponse httpResponse = client.send(request).block()) {
if (httpResponse.getStatusCode() != 200) {
fail("Failed to get response from function app");
}
}
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void testManagedIdentityWebAppDeployment() {
HttpClient client = HttpClient.createDefault();
String functionUrl = "https:
ClientLogger logger = new ClientLogger(LiveManagedIdentityTests.class);
logger.log(LogLevel.INFORMATIONAL, () -> "webappURL: " + functionUrl);
HttpRequest request = new HttpRequest(HttpMethod.GET, functionUrl);
try (HttpResponse httpResponse = client.send(request).block()) {
if (httpResponse.getStatusCode() != 200) {
fail("Failed to get response from web app");
}
}
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
@EnabledIfSystemProperty(named = "os.name", matches = "Linux")
public void testManagedIdentityAksDeployment() {
String os = System.getProperty("os.name");
System.out.println("OS: " + os);
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String spClientId = configuration.get("IDENTITY_CLIENT_ID");
String secret = configuration.get("IDENTITY_CLIENT_SECRET");
String tenantId = configuration.get("IDENTITY_TENANT_ID");
String resourceGroup = configuration.get("IDENTITY_RESOURCE_GROUP");
String aksCluster = configuration.get("IDENTITY_AKS_CLUSTER_NAME");
String subscriptionId = configuration.get("IDENTITY_SUBSCRIPTION_ID");
String podName = configuration.get("IDENTITY_AKS_POD_NAME");
String pathCommand = os.contains("Windows") ? "where" : "which";
String azPath = runCommand(pathCommand, "az").trim();
String kubectlPath = runCommand(pathCommand, "kubectl").trim();
runCommand(azPath, "login", "--service-principal", "-u", spClientId, "-p", secret, "--tenant", tenantId);
runCommand(azPath, "account", "set", "--subscription", subscriptionId);
runCommand(azPath, "aks", "get-credentials", "--resource-group", resourceGroup, "--name", aksCluster,
"--overwrite-existing");
String podOutput = runCommand(kubectlPath, "get", "pods", "-o", "jsonpath='{.items[0].metadata.name}'");
assertTrue(podOutput.contains(podName), "Pod name not found in the output");
String output = runCommand(kubectlPath, "exec", "-it", podName, "--", "java", "-jar", "/identity-test.jar");
Assertions.assertTrue(output.contains("Successfully retrieved managed identity tokens"), "Failed to get response from AKS");
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
@EnabledIfSystemProperty(named = "os.name", matches = "Linux")
public void testManagedIdentityVmDeployment() {
String os = System.getProperty("os.name");
System.out.println("OS: " + os);
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String spClientId = configuration.get("IDENTITY_CLIENT_ID");
String secret = configuration.get("IDENTITY_CLIENT_SECRET");
String tenantId = configuration.get("IDENTITY_TENANT_ID");
String resourceGroup = configuration.get("IDENTITY_RESOURCE_GROUP");
String subscriptionId = configuration.get("IDENTITY_SUBSCRIPTION_ID");
String vmName = configuration.get("IDENTITY_VM_NAME");
String storageAcccountName = configuration.get("IDENTITY_STORAGE_NAME_1");
boolean isWindows = os.contains("Windows");
String azPath = runCommand(isWindows ? "where" : "which", "az").trim();
azPath = isWindows ? extractAzCmdPath(azPath) : azPath;
runCommand(azPath, "login", "--service-principal", "-u", spClientId, "-p", secret, "--tenant", tenantId);
runCommand(azPath, "account", "set", "--subscription", subscriptionId);
String vmBlob = String.format("https:
String script = String.format("curl '%s' -o ./testfile.jar && java -jar ./testfile.jar", vmBlob);
String output = runCommand(azPath, "vm", "run-command", "invoke", "-n", vmName, "-g", resourceGroup,
"--command-id", "RunShellScript", "--scripts", script);
Assertions.assertTrue(output.contains("Successfully retrieved managed identity tokens"),
"Failed to get response from VM");
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void callGraphWithClientSecret() {
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String multiTenantId = configuration.get("AZURE_IDENTITY_MULTI_TENANT_TENANT_ID");
String multiClientId = configuration.get("AZURE_IDENTITY_MULTI_TENANT_CLIENT_ID");
String multiClientSecret = configuration.get("AZURE_IDENTITY_MULTI_TENANT_CLIENT_SECRET");
ClientSecretCredential credential = new ClientSecretCredentialBuilder()
.tenantId(multiTenantId)
.clientId(multiClientId)
.clientSecret(multiClientSecret)
.build();
AccessToken accessToken = credential
.getTokenSync(new TokenRequestContext().addScopes("https:
Assertions.assertTrue(accessToken != null, "Failed to get access token");
}
private String extractAzCmdPath(String output) {
String[] lines = output.split("\\r?\\n");
for (String line : lines) {
if (line.endsWith(".cmd")) {
return line;
}
}
return output;
}
} | class LiveManagedIdentityTests extends TestBase {
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void testManagedIdentityFuncDeployment() {
HttpClient client = HttpClient.createDefault();
String functionUrl = "https:
HttpRequest request = new HttpRequest(HttpMethod.GET, functionUrl);
try (HttpResponse httpResponse = client.send(request).block()) {
if (httpResponse.getStatusCode() != 200) {
fail("Failed to get response from function app");
}
}
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void testManagedIdentityWebAppDeployment() {
HttpClient client = HttpClient.createDefault();
String functionUrl = "https:
ClientLogger logger = new ClientLogger(LiveManagedIdentityTests.class);
logger.log(LogLevel.INFORMATIONAL, () -> "webappURL: " + functionUrl);
HttpRequest request = new HttpRequest(HttpMethod.GET, functionUrl);
try (HttpResponse httpResponse = client.send(request).block()) {
if (httpResponse.getStatusCode() != 200) {
fail("Failed to get response from web app");
}
}
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
@EnabledIfSystemProperty(named = "os.name", matches = "Linux")
@Timeout(value = 15, unit = TimeUnit.MINUTES)
public void testManagedIdentityAksDeployment() {
String os = System.getProperty("os.name");
System.out.println("OS: " + os);
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String spClientId = configuration.get("IDENTITY_CLIENT_ID");
String secret = configuration.get("IDENTITY_CLIENT_SECRET");
String tenantId = configuration.get("IDENTITY_TENANT_ID");
String resourceGroup = configuration.get("IDENTITY_RESOURCE_GROUP");
String aksCluster = configuration.get("IDENTITY_AKS_CLUSTER_NAME");
String subscriptionId = configuration.get("IDENTITY_SUBSCRIPTION_ID");
String podName = configuration.get("IDENTITY_AKS_POD_NAME");
String pathCommand = os.contains("Windows") ? "where" : "which";
String azPath = runCommand(pathCommand, "az").trim();
String kubectlPath = runCommand(pathCommand, "kubectl").trim();
runCommand(azPath, "login", "--service-principal", "-u", spClientId, "-p", secret, "--tenant", tenantId);
runCommand(azPath, "account", "set", "--subscription", subscriptionId);
runCommand(azPath, "aks", "get-credentials", "--resource-group", resourceGroup, "--name", aksCluster,
"--overwrite-existing");
String podOutput = runCommand(kubectlPath, "get", "pods", "-o", "jsonpath='{.items[0].metadata.name}'");
assertTrue(podOutput.contains(podName), "Pod name not found in the output");
String output = runCommand(kubectlPath, "exec", "-it", podName, "--", "java", "-jar", "/identity-test.jar");
assertTrue(output.contains("Successfully retrieved managed identity tokens"), "Failed to get response from AKS");
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
@EnabledIfSystemProperty(named = "os.name", matches = "Linux")
@Timeout(value = 15, unit = TimeUnit.MINUTES)
public void testManagedIdentityVmDeployment() {
String os = System.getProperty("os.name");
System.out.println("OS: " + os);
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String spClientId = configuration.get("IDENTITY_CLIENT_ID");
String secret = configuration.get("IDENTITY_CLIENT_SECRET");
String tenantId = configuration.get("IDENTITY_TENANT_ID");
String resourceGroup = configuration.get("IDENTITY_RESOURCE_GROUP");
String subscriptionId = configuration.get("IDENTITY_SUBSCRIPTION_ID");
String vmName = configuration.get("IDENTITY_VM_NAME");
String storageAcccountName = configuration.get("IDENTITY_STORAGE_NAME_1");
boolean isWindows = os.contains("Windows");
String azPath = runCommand(isWindows ? "where" : "which", "az").trim();
azPath = isWindows ? extractAzCmdPath(azPath) : azPath;
runCommand(azPath, "login", "--service-principal", "-u", spClientId, "-p", secret, "--tenant", tenantId);
runCommand(azPath, "account", "set", "--subscription", subscriptionId);
String storageKey = runCommand(azPath, "storage", "account", "keys", "list", "--account-name", storageAcccountName,
"--resource-group", resourceGroup, "--query", "[0].value", "--output", "tsv").trim();
String expiry = LocalDate.now().plusDays(2).format(DateTimeFormatter.ofPattern("yyyy-MM-dd"));
String sasToken = runCommand(azPath, "storage", "blob", "generate-sas", "--account-name", storageAcccountName,
"--account-key", "\"" + storageKey + "\"", "--container-name", "vmcontainer", "--name", "testfile.jar", "--permissions", "r",
"--expiry", expiry, "--https-only", "--output", "tsv").trim();
String vmBlob = String.format("https:
String script = String.format("curl \'%s\' -o ./testfile.jar && java -jar ./testfile.jar", vmBlob);
System.out.println("Script: " + script);
String output = runCommand(azPath, "vm", "run-command", "invoke", "-n", vmName, "-g", resourceGroup,
"--command-id", "RunShellScript", "--scripts", script);
assertTrue(output.contains("Successfully retrieved managed identity tokens"),
"Failed to get response from VM");
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void callGraphWithClientSecret() {
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String multiTenantId = configuration.get("AZURE_IDENTITY_MULTI_TENANT_TENANT_ID");
String multiClientId = configuration.get("AZURE_IDENTITY_MULTI_TENANT_CLIENT_ID");
String multiClientSecret = configuration.get("AZURE_IDENTITY_MULTI_TENANT_CLIENT_SECRET");
ClientSecretCredential credential = new ClientSecretCredentialBuilder()
.tenantId(multiTenantId)
.clientId(multiClientId)
.clientSecret(multiClientSecret)
.build();
AccessToken accessToken = credential
.getTokenSync(new TokenRequestContext().addScopes("https:
assertTrue(accessToken != null, "Failed to get access token");
}
private String extractAzCmdPath(String output) {
String[] lines = output.split("\\r?\\n");
for (String line : lines) {
if (line.endsWith(".cmd")) {
return line;
}
}
return output;
}
} |
Added it | private String runCommand(String... args) {
try {
StringBuilder command = new StringBuilder();
for (String arg : args) {
command.append(arg).append(" ");
}
System.out.println("Running command: " + command);
ProcessBuilder processBuilder = new ProcessBuilder(args);
processBuilder.redirectErrorStream(true);
Process process = processBuilder.start();
boolean finished = process.waitFor(300, TimeUnit.SECONDS);
if (!finished) {
process.destroy();
throw new RuntimeException("Process execution timeout");
}
StringBuilder output = new StringBuilder();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) {
String line;
while ((line = reader.readLine()) != null) {
output.append(line).append(System.lineSeparator());
}
}
System.out.println("Output:" + System.lineSeparator() + output);
return output.toString();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
} | boolean finished = process.waitFor(300, TimeUnit.SECONDS); | private String runCommand(String... args) {
try {
StringBuilder command = new StringBuilder();
for (String arg : args) {
command.append(arg).append(" ");
}
System.out.println("Running command: " + command);
ProcessBuilder processBuilder = new ProcessBuilder(args);
processBuilder.redirectErrorStream(true);
Process process = processBuilder.start();
process.waitFor();
StringBuilder output = new StringBuilder();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) {
String line;
while ((line = reader.readLine()) != null) {
output.append(line).append(System.lineSeparator());
}
}
System.out.println("Output:" + System.lineSeparator() + output);
return output.toString();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
} | class LiveManagedIdentityTests extends TestBase {
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void testManagedIdentityFuncDeployment() {
HttpClient client = HttpClient.createDefault();
String functionUrl = "https:
HttpRequest request = new HttpRequest(HttpMethod.GET, functionUrl);
try (HttpResponse httpResponse = client.send(request).block()) {
if (httpResponse.getStatusCode() != 200) {
fail("Failed to get response from function app");
}
}
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void testManagedIdentityWebAppDeployment() {
HttpClient client = HttpClient.createDefault();
String functionUrl = "https:
ClientLogger logger = new ClientLogger(LiveManagedIdentityTests.class);
logger.log(LogLevel.INFORMATIONAL, () -> "webappURL: " + functionUrl);
HttpRequest request = new HttpRequest(HttpMethod.GET, functionUrl);
try (HttpResponse httpResponse = client.send(request).block()) {
if (httpResponse.getStatusCode() != 200) {
fail("Failed to get response from web app");
}
}
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
@EnabledIfSystemProperty(named = "os.name", matches = "Linux")
public void testManagedIdentityAksDeployment() {
String os = System.getProperty("os.name");
System.out.println("OS: " + os);
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String spClientId = configuration.get("IDENTITY_CLIENT_ID");
String secret = configuration.get("IDENTITY_CLIENT_SECRET");
String tenantId = configuration.get("IDENTITY_TENANT_ID");
String resourceGroup = configuration.get("IDENTITY_RESOURCE_GROUP");
String aksCluster = configuration.get("IDENTITY_AKS_CLUSTER_NAME");
String subscriptionId = configuration.get("IDENTITY_SUBSCRIPTION_ID");
String podName = configuration.get("IDENTITY_AKS_POD_NAME");
String pathCommand = os.contains("Windows") ? "where" : "which";
String azPath = runCommand(pathCommand, "az").trim();
String kubectlPath = runCommand(pathCommand, "kubectl").trim();
runCommand(azPath, "login", "--service-principal", "-u", spClientId, "-p", secret, "--tenant", tenantId);
runCommand(azPath, "account", "set", "--subscription", subscriptionId);
runCommand(azPath, "aks", "get-credentials", "--resource-group", resourceGroup, "--name", aksCluster,
"--overwrite-existing");
String podOutput = runCommand(kubectlPath, "get", "pods", "-o", "jsonpath='{.items[0].metadata.name}'");
assertTrue(podOutput.contains(podName), "Pod name not found in the output");
String output = runCommand(kubectlPath, "exec", "-it", podName, "--", "java", "-jar", "/identity-test.jar");
Assertions.assertTrue(output.contains("Successfully retrieved managed identity tokens"), "Failed to get response from AKS");
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
@EnabledIfSystemProperty(named = "os.name", matches = "Linux")
public void testManagedIdentityVmDeployment() {
String os = System.getProperty("os.name");
System.out.println("OS: " + os);
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String spClientId = configuration.get("IDENTITY_CLIENT_ID");
String secret = configuration.get("IDENTITY_CLIENT_SECRET");
String tenantId = configuration.get("IDENTITY_TENANT_ID");
String resourceGroup = configuration.get("IDENTITY_RESOURCE_GROUP");
String subscriptionId = configuration.get("IDENTITY_SUBSCRIPTION_ID");
String vmName = configuration.get("IDENTITY_VM_NAME");
String storageAcccountName = configuration.get("IDENTITY_STORAGE_NAME_1");
boolean isWindows = os.contains("Windows");
String azPath = runCommand(isWindows ? "where" : "which", "az").trim();
azPath = isWindows ? extractAzCmdPath(azPath) : azPath;
runCommand(azPath, "login", "--service-principal", "-u", spClientId, "-p", secret, "--tenant", tenantId);
runCommand(azPath, "account", "set", "--subscription", subscriptionId);
String vmBlob = String.format("https:
String script = String.format("curl '%s' -o ./testfile.jar && java -jar ./testfile.jar", vmBlob);
String output = runCommand(azPath, "vm", "run-command", "invoke", "-n", vmName, "-g", resourceGroup,
"--command-id", "RunShellScript", "--scripts", script);
Assertions.assertTrue(output.contains("Successfully retrieved managed identity tokens"),
"Failed to get response from VM");
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void callGraphWithClientSecret() {
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String multiTenantId = configuration.get("AZURE_IDENTITY_MULTI_TENANT_TENANT_ID");
String multiClientId = configuration.get("AZURE_IDENTITY_MULTI_TENANT_CLIENT_ID");
String multiClientSecret = configuration.get("AZURE_IDENTITY_MULTI_TENANT_CLIENT_SECRET");
ClientSecretCredential credential = new ClientSecretCredentialBuilder()
.tenantId(multiTenantId)
.clientId(multiClientId)
.clientSecret(multiClientSecret)
.build();
AccessToken accessToken = credential
.getTokenSync(new TokenRequestContext().addScopes("https:
Assertions.assertTrue(accessToken != null, "Failed to get access token");
}
private String extractAzCmdPath(String output) {
String[] lines = output.split("\\r?\\n");
for (String line : lines) {
if (line.endsWith(".cmd")) {
return line;
}
}
return output;
}
} | class LiveManagedIdentityTests extends TestBase {
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void testManagedIdentityFuncDeployment() {
HttpClient client = HttpClient.createDefault();
String functionUrl = "https:
HttpRequest request = new HttpRequest(HttpMethod.GET, functionUrl);
try (HttpResponse httpResponse = client.send(request).block()) {
if (httpResponse.getStatusCode() != 200) {
fail("Failed to get response from function app");
}
}
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void testManagedIdentityWebAppDeployment() {
HttpClient client = HttpClient.createDefault();
String functionUrl = "https:
ClientLogger logger = new ClientLogger(LiveManagedIdentityTests.class);
logger.log(LogLevel.INFORMATIONAL, () -> "webappURL: " + functionUrl);
HttpRequest request = new HttpRequest(HttpMethod.GET, functionUrl);
try (HttpResponse httpResponse = client.send(request).block()) {
if (httpResponse.getStatusCode() != 200) {
fail("Failed to get response from web app");
}
}
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
@EnabledIfSystemProperty(named = "os.name", matches = "Linux")
@Timeout(value = 15, unit = TimeUnit.MINUTES)
public void testManagedIdentityAksDeployment() {
String os = System.getProperty("os.name");
System.out.println("OS: " + os);
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String spClientId = configuration.get("IDENTITY_CLIENT_ID");
String secret = configuration.get("IDENTITY_CLIENT_SECRET");
String tenantId = configuration.get("IDENTITY_TENANT_ID");
String resourceGroup = configuration.get("IDENTITY_RESOURCE_GROUP");
String aksCluster = configuration.get("IDENTITY_AKS_CLUSTER_NAME");
String subscriptionId = configuration.get("IDENTITY_SUBSCRIPTION_ID");
String podName = configuration.get("IDENTITY_AKS_POD_NAME");
String pathCommand = os.contains("Windows") ? "where" : "which";
String azPath = runCommand(pathCommand, "az").trim();
String kubectlPath = runCommand(pathCommand, "kubectl").trim();
runCommand(azPath, "login", "--service-principal", "-u", spClientId, "-p", secret, "--tenant", tenantId);
runCommand(azPath, "account", "set", "--subscription", subscriptionId);
runCommand(azPath, "aks", "get-credentials", "--resource-group", resourceGroup, "--name", aksCluster,
"--overwrite-existing");
String podOutput = runCommand(kubectlPath, "get", "pods", "-o", "jsonpath='{.items[0].metadata.name}'");
assertTrue(podOutput.contains(podName), "Pod name not found in the output");
String output = runCommand(kubectlPath, "exec", "-it", podName, "--", "java", "-jar", "/identity-test.jar");
assertTrue(output.contains("Successfully retrieved managed identity tokens"), "Failed to get response from AKS");
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
@EnabledIfSystemProperty(named = "os.name", matches = "Linux")
@Timeout(value = 15, unit = TimeUnit.MINUTES)
public void testManagedIdentityVmDeployment() {
String os = System.getProperty("os.name");
System.out.println("OS: " + os);
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String spClientId = configuration.get("IDENTITY_CLIENT_ID");
String secret = configuration.get("IDENTITY_CLIENT_SECRET");
String tenantId = configuration.get("IDENTITY_TENANT_ID");
String resourceGroup = configuration.get("IDENTITY_RESOURCE_GROUP");
String subscriptionId = configuration.get("IDENTITY_SUBSCRIPTION_ID");
String vmName = configuration.get("IDENTITY_VM_NAME");
String storageAcccountName = configuration.get("IDENTITY_STORAGE_NAME_1");
boolean isWindows = os.contains("Windows");
String azPath = runCommand(isWindows ? "where" : "which", "az").trim();
azPath = isWindows ? extractAzCmdPath(azPath) : azPath;
runCommand(azPath, "login", "--service-principal", "-u", spClientId, "-p", secret, "--tenant", tenantId);
runCommand(azPath, "account", "set", "--subscription", subscriptionId);
String storageKey = runCommand(azPath, "storage", "account", "keys", "list", "--account-name", storageAcccountName,
"--resource-group", resourceGroup, "--query", "[0].value", "--output", "tsv").trim();
String expiry = LocalDate.now().plusDays(2).format(DateTimeFormatter.ofPattern("yyyy-MM-dd"));
String sasToken = runCommand(azPath, "storage", "blob", "generate-sas", "--account-name", storageAcccountName,
"--account-key", "\"" + storageKey + "\"", "--container-name", "vmcontainer", "--name", "testfile.jar", "--permissions", "r",
"--expiry", expiry, "--https-only", "--output", "tsv").trim();
String vmBlob = String.format("https:
String script = String.format("curl \'%s\' -o ./testfile.jar && java -jar ./testfile.jar", vmBlob);
System.out.println("Script: " + script);
String output = runCommand(azPath, "vm", "run-command", "invoke", "-n", vmName, "-g", resourceGroup,
"--command-id", "RunShellScript", "--scripts", script);
assertTrue(output.contains("Successfully retrieved managed identity tokens"),
"Failed to get response from VM");
}
@Test
@EnabledIfEnvironmentVariable(named = "AZURE_TEST_MODE", matches = "LIVE")
public void callGraphWithClientSecret() {
Configuration configuration = Configuration.getGlobalConfiguration().clone();
String multiTenantId = configuration.get("AZURE_IDENTITY_MULTI_TENANT_TENANT_ID");
String multiClientId = configuration.get("AZURE_IDENTITY_MULTI_TENANT_CLIENT_ID");
String multiClientSecret = configuration.get("AZURE_IDENTITY_MULTI_TENANT_CLIENT_SECRET");
ClientSecretCredential credential = new ClientSecretCredentialBuilder()
.tenantId(multiTenantId)
.clientId(multiClientId)
.clientSecret(multiClientSecret)
.build();
AccessToken accessToken = credential
.getTokenSync(new TokenRequestContext().addScopes("https:
assertTrue(accessToken != null, "Failed to get access token");
}
private String extractAzCmdPath(String output) {
String[] lines = output.split("\\r?\\n");
for (String line : lines) {
if (line.endsWith(".cmd")) {
return line;
}
}
return output;
}
} |
wondering if we need this non-null check given the earlier null check & return? | private static OpenTelemetry init() {
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
return OpenTelemetry.noop();
}
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
if (applicationInsightsConnectionString != null) {
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
}
String instanceId = System.getenv("CONTAINER_NAME");
OpenTelemetry otel = sdkBuilder
.addResourceCustomizer((resource, props) ->
instanceId == null ? resource : resource.toBuilder().put(AttributeKey.stringKey("service.instance.id"), instanceId).build())
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
} | if (applicationInsightsConnectionString != null) { | private static OpenTelemetry init() {
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
return OpenTelemetry.noop();
}
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
String instanceId = System.getenv("CONTAINER_NAME");
OpenTelemetry otel = sdkBuilder
.addResourceCustomizer((resource, props) ->
instanceId == null ? resource : resource.toBuilder().put(AttributeKey.stringKey("service.instance.id"), instanceId).build())
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final OpenTelemetry OTEL;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private final String scenarioName;
private final Meter meter;
private final LongCounter closedPartitionCounter;
private final LongCounter initializedPartitionCounter;
private final LongCounter errorCounter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.closedPartitionCounter = meter.counterBuilder("partition_closed").build();
this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build();
this.errorCounter = meter.counterBuilder("test.run.errors").build();
}
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
/**
* Re-initializes logging to otel - necessary in spring applications
*/
public void initLogging() {
OpenTelemetryAppender.install(OTEL);
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
* @param method the method name
* @param partitionId the partition id
*/
@SuppressWarnings("try")
public void instrumentProcess(Runnable oneRun, String method, String partitionId) {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
oneRun.run();
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span, method, partitionId);
}
throw e;
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @param method the method name
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Mono<Void> runAsync, String method) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
return runAsync.doOnError(e -> trackFailure(start, e, span, method, null))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, Context.current()))
.onErrorResume(e -> Mono.empty());
}
});
}
private void trackSuccess(Instant start, Span span) {
runDuration.record(getDuration(start), commonAttributes);
span.end();
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
recordError(errorType, unwrapped, method, partitionId);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param span the span to record attributes on
* @param options test parameters
*/
public void recordOptions(Span span, ScenarioOptions options) {
String libraryPackageVersion = "unknown";
try {
Class<?> libraryPackage = Class.forName(EventHubClientBuilder.class.getName());
libraryPackageVersion = libraryPackage.getPackage().getImplementationVersion();
if (libraryPackageVersion == null) {
libraryPackageVersion = "null";
}
} catch (ClassNotFoundException e) {
logger.atWarning()
.addKeyValue("class", EventHubClientBuilder.class.getName())
.log("Could not determine azure-eventhubs-messaging version, EventHubClientBuilder class is not found", e);
}
span.setAttribute(AttributeKey.longKey("durationSec"), options.getTestDuration().getSeconds());
span.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
span.setAttribute(AttributeKey.stringKey("packageVersion"), libraryPackageVersion);
span.setAttribute(AttributeKey.stringKey("eventHubName"), options.getEventHubsEventHubName());
span.setAttribute(AttributeKey.stringKey("consumerGroupName"), options.getEventHubsConsumerGroup());
span.setAttribute(AttributeKey.longKey("messageSize"), options.getMessageSize());
span.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
span.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
span.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
}
public Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
public void recordError(String errorReason, String method, String partitionId) {
recordError(errorReason, null, method, partitionId);
}
public <T extends Throwable> void recordError(T ex, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(ex);
recordError(unwrapped.getClass().getName(), unwrapped, method, partitionId);
}
private void recordError(String errorReason, Throwable ex, String method, String partitionId) {
AttributesBuilder attributesBuilder = Attributes.builder()
.put(AttributeKey.stringKey("error.type"), errorReason)
.put(AttributeKey.stringKey("method"), method);
if (partitionId != null) {
attributesBuilder.put(AttributeKey.stringKey("partitionId"), partitionId);
}
errorCounter.add(1, attributesBuilder.build());
LoggingEventBuilder log = logger.atError()
.addKeyValue("partitionId", partitionId)
.addKeyValue("error.type", errorReason)
.addKeyValue("method", method);
if (ex != null) {
log.log("test error", ex);
} else {
log.log("test error");
}
}
public void recordPartitionClosedEvent(CloseContext closeContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), closeContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("closed_reason"), closeContext.getCloseReason().toString())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
closedPartitionCounter.add(1, attributes);
}
public void recordPartitionInitializedEvent(InitializationContext initializationContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), initializationContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
initializedPartitionCounter.add(1, attributes);
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final OpenTelemetry OTEL;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private final String scenarioName;
private final Meter meter;
private final LongCounter closedPartitionCounter;
private final LongCounter initializedPartitionCounter;
private final LongCounter errorCounter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.closedPartitionCounter = meter.counterBuilder("partition_closed").build();
this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build();
this.errorCounter = meter.counterBuilder("test.run.errors").build();
}
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
/**
* Re-initializes logging to otel - necessary in spring applications
*/
public void initLogging() {
OpenTelemetryAppender.install(OTEL);
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
* @param method the method name
* @param partitionId the partition id
*/
@SuppressWarnings("try")
public void instrumentProcess(Runnable oneRun, String method, String partitionId) {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
oneRun.run();
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span, method, partitionId);
}
throw e;
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @param method the method name
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Mono<Void> runAsync, String method) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
return runAsync.doOnError(e -> trackFailure(start, e, span, method, null))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, Context.current()))
.onErrorResume(e -> Mono.empty());
}
});
}
private void trackSuccess(Instant start, Span span) {
runDuration.record(getDuration(start), commonAttributes);
span.end();
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
recordError(errorType, unwrapped, method, partitionId);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param span the span to record attributes on
* @param options test parameters
*/
public void recordOptions(Span span, ScenarioOptions options) {
String libraryPackageVersion = "unknown";
try {
Class<?> libraryPackage = Class.forName(EventHubClientBuilder.class.getName());
libraryPackageVersion = libraryPackage.getPackage().getImplementationVersion();
if (libraryPackageVersion == null) {
libraryPackageVersion = "null";
}
} catch (ClassNotFoundException e) {
logger.atWarning()
.addKeyValue("class", EventHubClientBuilder.class.getName())
.log("Could not determine azure-eventhubs-messaging version, EventHubClientBuilder class is not found", e);
}
span.setAttribute(AttributeKey.longKey("durationSec"), options.getTestDuration().getSeconds());
span.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
span.setAttribute(AttributeKey.stringKey("packageVersion"), libraryPackageVersion);
span.setAttribute(AttributeKey.stringKey("eventHubName"), options.getEventHubsEventHubName());
span.setAttribute(AttributeKey.stringKey("consumerGroupName"), options.getEventHubsConsumerGroup());
span.setAttribute(AttributeKey.longKey("messageSize"), options.getMessageSize());
span.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
span.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
span.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
}
public Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
public void recordError(String errorReason, String method, String partitionId) {
recordError(errorReason, null, method, partitionId);
}
public <T extends Throwable> void recordError(T ex, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(ex);
recordError(unwrapped.getClass().getName(), unwrapped, method, partitionId);
}
private void recordError(String errorReason, Throwable ex, String method, String partitionId) {
AttributesBuilder attributesBuilder = Attributes.builder()
.put(AttributeKey.stringKey("error.type"), errorReason)
.put(AttributeKey.stringKey("method"), method);
if (partitionId != null) {
attributesBuilder.put(AttributeKey.stringKey("partitionId"), partitionId);
}
errorCounter.add(1, attributesBuilder.build());
LoggingEventBuilder log = logger.atError()
.addKeyValue("partitionId", partitionId)
.addKeyValue("error.type", errorReason)
.addKeyValue("method", method);
if (ex != null) {
log.log("test error", ex);
} else {
log.log("test error");
}
}
public void recordPartitionClosedEvent(CloseContext closeContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), closeContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("closed_reason"), closeContext.getCloseReason().toString())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
closedPartitionCounter.add(1, attributes);
}
public void recordPartitionInitializedEvent(InitializationContext initializationContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), initializationContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
initializedPartitionCounter.add(1, attributes);
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
} |
for my own learning, is it a common pattern to use singular for counter name? for example, `partition_closed` instead of `partitions_closed` | public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.closedPartitionCounter = meter.counterBuilder("partition_closed").build();
this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build();
this.errorCounter = meter.counterBuilder("test.run.errors").build();
} | this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build(); | public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.closedPartitionCounter = meter.counterBuilder("partition_closed").build();
this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build();
this.errorCounter = meter.counterBuilder("test.run.errors").build();
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final OpenTelemetry OTEL;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private final String scenarioName;
private final Meter meter;
private final LongCounter closedPartitionCounter;
private final LongCounter initializedPartitionCounter;
private final LongCounter errorCounter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
private static OpenTelemetry init() {
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
return OpenTelemetry.noop();
}
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
if (applicationInsightsConnectionString != null) {
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
}
String instanceId = System.getenv("CONTAINER_NAME");
OpenTelemetry otel = sdkBuilder
.addResourceCustomizer((resource, props) ->
instanceId == null ? resource : resource.toBuilder().put(AttributeKey.stringKey("service.instance.id"), instanceId).build())
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
}
/**
* Re-initializes logging to otel - necessary in spring applications
*/
public void initLogging() {
OpenTelemetryAppender.install(OTEL);
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
* @param method the method name
* @param partitionId the partition id
*/
@SuppressWarnings("try")
public void instrumentProcess(Runnable oneRun, String method, String partitionId) {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
oneRun.run();
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span, method, partitionId);
}
throw e;
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @param method the method name
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Mono<Void> runAsync, String method) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
return runAsync.doOnError(e -> trackFailure(start, e, span, method, null))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, Context.current()))
.onErrorResume(e -> Mono.empty());
}
});
}
private void trackSuccess(Instant start, Span span) {
runDuration.record(getDuration(start), commonAttributes);
span.end();
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
recordError(errorType, unwrapped, method, partitionId);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param span the span to record attributes on
* @param options test parameters
*/
public void recordOptions(Span span, ScenarioOptions options) {
String libraryPackageVersion = "unknown";
try {
Class<?> libraryPackage = Class.forName(EventHubClientBuilder.class.getName());
libraryPackageVersion = libraryPackage.getPackage().getImplementationVersion();
if (libraryPackageVersion == null) {
libraryPackageVersion = "null";
}
} catch (ClassNotFoundException e) {
logger.atWarning()
.addKeyValue("class", EventHubClientBuilder.class.getName())
.log("Could not determine azure-eventhubs-messaging version, EventHubClientBuilder class is not found", e);
}
span.setAttribute(AttributeKey.longKey("durationSec"), options.getTestDuration().getSeconds());
span.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
span.setAttribute(AttributeKey.stringKey("packageVersion"), libraryPackageVersion);
span.setAttribute(AttributeKey.stringKey("eventHubName"), options.getEventHubsEventHubName());
span.setAttribute(AttributeKey.stringKey("consumerGroupName"), options.getEventHubsConsumerGroup());
span.setAttribute(AttributeKey.longKey("messageSize"), options.getMessageSize());
span.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
span.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
span.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
}
public Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
public void recordError(String errorReason, String method, String partitionId) {
recordError(errorReason, null, method, partitionId);
}
public <T extends Throwable> void recordError(T ex, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(ex);
recordError(unwrapped.getClass().getName(), unwrapped, method, partitionId);
}
private void recordError(String errorReason, Throwable ex, String method, String partitionId) {
AttributesBuilder attributesBuilder = Attributes.builder()
.put(AttributeKey.stringKey("error.type"), errorReason)
.put(AttributeKey.stringKey("method"), method);
if (partitionId != null) {
attributesBuilder.put(AttributeKey.stringKey("partitionId"), partitionId);
}
errorCounter.add(1, attributesBuilder.build());
LoggingEventBuilder log = logger.atError()
.addKeyValue("partitionId", partitionId)
.addKeyValue("error.type", errorReason)
.addKeyValue("method", method);
if (ex != null) {
log.log("test error", ex);
} else {
log.log("test error");
}
}
public void recordPartitionClosedEvent(CloseContext closeContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), closeContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("closed_reason"), closeContext.getCloseReason().toString())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
closedPartitionCounter.add(1, attributes);
}
public void recordPartitionInitializedEvent(InitializationContext initializationContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), initializationContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
initializedPartitionCounter.add(1, attributes);
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final OpenTelemetry OTEL;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private final String scenarioName;
private final Meter meter;
private final LongCounter closedPartitionCounter;
private final LongCounter initializedPartitionCounter;
private final LongCounter errorCounter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
private static OpenTelemetry init() {
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
return OpenTelemetry.noop();
}
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
String instanceId = System.getenv("CONTAINER_NAME");
OpenTelemetry otel = sdkBuilder
.addResourceCustomizer((resource, props) ->
instanceId == null ? resource : resource.toBuilder().put(AttributeKey.stringKey("service.instance.id"), instanceId).build())
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
}
/**
* Re-initializes logging to otel - necessary in spring applications
*/
public void initLogging() {
OpenTelemetryAppender.install(OTEL);
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
* @param method the method name
* @param partitionId the partition id
*/
@SuppressWarnings("try")
public void instrumentProcess(Runnable oneRun, String method, String partitionId) {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
oneRun.run();
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span, method, partitionId);
}
throw e;
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @param method the method name
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Mono<Void> runAsync, String method) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
return runAsync.doOnError(e -> trackFailure(start, e, span, method, null))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, Context.current()))
.onErrorResume(e -> Mono.empty());
}
});
}
private void trackSuccess(Instant start, Span span) {
runDuration.record(getDuration(start), commonAttributes);
span.end();
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
recordError(errorType, unwrapped, method, partitionId);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param span the span to record attributes on
* @param options test parameters
*/
public void recordOptions(Span span, ScenarioOptions options) {
String libraryPackageVersion = "unknown";
try {
Class<?> libraryPackage = Class.forName(EventHubClientBuilder.class.getName());
libraryPackageVersion = libraryPackage.getPackage().getImplementationVersion();
if (libraryPackageVersion == null) {
libraryPackageVersion = "null";
}
} catch (ClassNotFoundException e) {
logger.atWarning()
.addKeyValue("class", EventHubClientBuilder.class.getName())
.log("Could not determine azure-eventhubs-messaging version, EventHubClientBuilder class is not found", e);
}
span.setAttribute(AttributeKey.longKey("durationSec"), options.getTestDuration().getSeconds());
span.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
span.setAttribute(AttributeKey.stringKey("packageVersion"), libraryPackageVersion);
span.setAttribute(AttributeKey.stringKey("eventHubName"), options.getEventHubsEventHubName());
span.setAttribute(AttributeKey.stringKey("consumerGroupName"), options.getEventHubsConsumerGroup());
span.setAttribute(AttributeKey.longKey("messageSize"), options.getMessageSize());
span.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
span.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
span.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
}
public Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
public void recordError(String errorReason, String method, String partitionId) {
recordError(errorReason, null, method, partitionId);
}
public <T extends Throwable> void recordError(T ex, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(ex);
recordError(unwrapped.getClass().getName(), unwrapped, method, partitionId);
}
private void recordError(String errorReason, Throwable ex, String method, String partitionId) {
AttributesBuilder attributesBuilder = Attributes.builder()
.put(AttributeKey.stringKey("error.type"), errorReason)
.put(AttributeKey.stringKey("method"), method);
if (partitionId != null) {
attributesBuilder.put(AttributeKey.stringKey("partitionId"), partitionId);
}
errorCounter.add(1, attributesBuilder.build());
LoggingEventBuilder log = logger.atError()
.addKeyValue("partitionId", partitionId)
.addKeyValue("error.type", errorReason)
.addKeyValue("method", method);
if (ex != null) {
log.log("test error", ex);
} else {
log.log("test error");
}
}
public void recordPartitionClosedEvent(CloseContext closeContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), closeContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("closed_reason"), closeContext.getCloseReason().toString())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
closedPartitionCounter.add(1, attributes);
}
public void recordPartitionInitializedEvent(InitializationContext initializationContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), initializationContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
initializedPartitionCounter.add(1, attributes);
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
} |
this name does not follow otel naming best practices in many ways :) I want to add those metrics to EH SDK and then will fix names. Here're naming best practices: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/general/metrics.md#pluralization | public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.closedPartitionCounter = meter.counterBuilder("partition_closed").build();
this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build();
this.errorCounter = meter.counterBuilder("test.run.errors").build();
} | this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build(); | public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.closedPartitionCounter = meter.counterBuilder("partition_closed").build();
this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build();
this.errorCounter = meter.counterBuilder("test.run.errors").build();
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final OpenTelemetry OTEL;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private final String scenarioName;
private final Meter meter;
private final LongCounter closedPartitionCounter;
private final LongCounter initializedPartitionCounter;
private final LongCounter errorCounter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
private static OpenTelemetry init() {
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
return OpenTelemetry.noop();
}
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
if (applicationInsightsConnectionString != null) {
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
}
String instanceId = System.getenv("CONTAINER_NAME");
OpenTelemetry otel = sdkBuilder
.addResourceCustomizer((resource, props) ->
instanceId == null ? resource : resource.toBuilder().put(AttributeKey.stringKey("service.instance.id"), instanceId).build())
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
}
/**
* Re-initializes logging to otel - necessary in spring applications
*/
public void initLogging() {
OpenTelemetryAppender.install(OTEL);
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
* @param method the method name
* @param partitionId the partition id
*/
@SuppressWarnings("try")
public void instrumentProcess(Runnable oneRun, String method, String partitionId) {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
oneRun.run();
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span, method, partitionId);
}
throw e;
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @param method the method name
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Mono<Void> runAsync, String method) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
return runAsync.doOnError(e -> trackFailure(start, e, span, method, null))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, Context.current()))
.onErrorResume(e -> Mono.empty());
}
});
}
private void trackSuccess(Instant start, Span span) {
runDuration.record(getDuration(start), commonAttributes);
span.end();
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
recordError(errorType, unwrapped, method, partitionId);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param span the span to record attributes on
* @param options test parameters
*/
public void recordOptions(Span span, ScenarioOptions options) {
String libraryPackageVersion = "unknown";
try {
Class<?> libraryPackage = Class.forName(EventHubClientBuilder.class.getName());
libraryPackageVersion = libraryPackage.getPackage().getImplementationVersion();
if (libraryPackageVersion == null) {
libraryPackageVersion = "null";
}
} catch (ClassNotFoundException e) {
logger.atWarning()
.addKeyValue("class", EventHubClientBuilder.class.getName())
.log("Could not determine azure-eventhubs-messaging version, EventHubClientBuilder class is not found", e);
}
span.setAttribute(AttributeKey.longKey("durationSec"), options.getTestDuration().getSeconds());
span.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
span.setAttribute(AttributeKey.stringKey("packageVersion"), libraryPackageVersion);
span.setAttribute(AttributeKey.stringKey("eventHubName"), options.getEventHubsEventHubName());
span.setAttribute(AttributeKey.stringKey("consumerGroupName"), options.getEventHubsConsumerGroup());
span.setAttribute(AttributeKey.longKey("messageSize"), options.getMessageSize());
span.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
span.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
span.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
}
public Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
public void recordError(String errorReason, String method, String partitionId) {
recordError(errorReason, null, method, partitionId);
}
public <T extends Throwable> void recordError(T ex, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(ex);
recordError(unwrapped.getClass().getName(), unwrapped, method, partitionId);
}
private void recordError(String errorReason, Throwable ex, String method, String partitionId) {
AttributesBuilder attributesBuilder = Attributes.builder()
.put(AttributeKey.stringKey("error.type"), errorReason)
.put(AttributeKey.stringKey("method"), method);
if (partitionId != null) {
attributesBuilder.put(AttributeKey.stringKey("partitionId"), partitionId);
}
errorCounter.add(1, attributesBuilder.build());
LoggingEventBuilder log = logger.atError()
.addKeyValue("partitionId", partitionId)
.addKeyValue("error.type", errorReason)
.addKeyValue("method", method);
if (ex != null) {
log.log("test error", ex);
} else {
log.log("test error");
}
}
public void recordPartitionClosedEvent(CloseContext closeContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), closeContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("closed_reason"), closeContext.getCloseReason().toString())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
closedPartitionCounter.add(1, attributes);
}
public void recordPartitionInitializedEvent(InitializationContext initializationContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), initializationContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
initializedPartitionCounter.add(1, attributes);
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final OpenTelemetry OTEL;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private final String scenarioName;
private final Meter meter;
private final LongCounter closedPartitionCounter;
private final LongCounter initializedPartitionCounter;
private final LongCounter errorCounter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
private static OpenTelemetry init() {
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
return OpenTelemetry.noop();
}
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
String instanceId = System.getenv("CONTAINER_NAME");
OpenTelemetry otel = sdkBuilder
.addResourceCustomizer((resource, props) ->
instanceId == null ? resource : resource.toBuilder().put(AttributeKey.stringKey("service.instance.id"), instanceId).build())
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
}
/**
* Re-initializes logging to otel - necessary in spring applications
*/
public void initLogging() {
OpenTelemetryAppender.install(OTEL);
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
* @param method the method name
* @param partitionId the partition id
*/
@SuppressWarnings("try")
public void instrumentProcess(Runnable oneRun, String method, String partitionId) {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
oneRun.run();
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span, method, partitionId);
}
throw e;
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @param method the method name
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Mono<Void> runAsync, String method) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
return runAsync.doOnError(e -> trackFailure(start, e, span, method, null))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, Context.current()))
.onErrorResume(e -> Mono.empty());
}
});
}
private void trackSuccess(Instant start, Span span) {
runDuration.record(getDuration(start), commonAttributes);
span.end();
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
recordError(errorType, unwrapped, method, partitionId);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param span the span to record attributes on
* @param options test parameters
*/
public void recordOptions(Span span, ScenarioOptions options) {
String libraryPackageVersion = "unknown";
try {
Class<?> libraryPackage = Class.forName(EventHubClientBuilder.class.getName());
libraryPackageVersion = libraryPackage.getPackage().getImplementationVersion();
if (libraryPackageVersion == null) {
libraryPackageVersion = "null";
}
} catch (ClassNotFoundException e) {
logger.atWarning()
.addKeyValue("class", EventHubClientBuilder.class.getName())
.log("Could not determine azure-eventhubs-messaging version, EventHubClientBuilder class is not found", e);
}
span.setAttribute(AttributeKey.longKey("durationSec"), options.getTestDuration().getSeconds());
span.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
span.setAttribute(AttributeKey.stringKey("packageVersion"), libraryPackageVersion);
span.setAttribute(AttributeKey.stringKey("eventHubName"), options.getEventHubsEventHubName());
span.setAttribute(AttributeKey.stringKey("consumerGroupName"), options.getEventHubsConsumerGroup());
span.setAttribute(AttributeKey.longKey("messageSize"), options.getMessageSize());
span.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
span.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
span.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
}
public Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
public void recordError(String errorReason, String method, String partitionId) {
recordError(errorReason, null, method, partitionId);
}
public <T extends Throwable> void recordError(T ex, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(ex);
recordError(unwrapped.getClass().getName(), unwrapped, method, partitionId);
}
private void recordError(String errorReason, Throwable ex, String method, String partitionId) {
AttributesBuilder attributesBuilder = Attributes.builder()
.put(AttributeKey.stringKey("error.type"), errorReason)
.put(AttributeKey.stringKey("method"), method);
if (partitionId != null) {
attributesBuilder.put(AttributeKey.stringKey("partitionId"), partitionId);
}
errorCounter.add(1, attributesBuilder.build());
LoggingEventBuilder log = logger.atError()
.addKeyValue("partitionId", partitionId)
.addKeyValue("error.type", errorReason)
.addKeyValue("method", method);
if (ex != null) {
log.log("test error", ex);
} else {
log.log("test error");
}
}
public void recordPartitionClosedEvent(CloseContext closeContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), closeContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("closed_reason"), closeContext.getCloseReason().toString())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
closedPartitionCounter.add(1, attributes);
}
public void recordPartitionInitializedEvent(InitializationContext initializationContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), initializationContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
initializedPartitionCounter.add(1, attributes);
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
} |
Thank you for the reference | public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.closedPartitionCounter = meter.counterBuilder("partition_closed").build();
this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build();
this.errorCounter = meter.counterBuilder("test.run.errors").build();
} | this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build(); | public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.closedPartitionCounter = meter.counterBuilder("partition_closed").build();
this.initializedPartitionCounter = meter.counterBuilder("partition_initialized").build();
this.errorCounter = meter.counterBuilder("test.run.errors").build();
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final OpenTelemetry OTEL;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private final String scenarioName;
private final Meter meter;
private final LongCounter closedPartitionCounter;
private final LongCounter initializedPartitionCounter;
private final LongCounter errorCounter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
private static OpenTelemetry init() {
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
return OpenTelemetry.noop();
}
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
if (applicationInsightsConnectionString != null) {
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
}
String instanceId = System.getenv("CONTAINER_NAME");
OpenTelemetry otel = sdkBuilder
.addResourceCustomizer((resource, props) ->
instanceId == null ? resource : resource.toBuilder().put(AttributeKey.stringKey("service.instance.id"), instanceId).build())
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
}
/**
* Re-initializes logging to otel - necessary in spring applications
*/
public void initLogging() {
OpenTelemetryAppender.install(OTEL);
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
* @param method the method name
* @param partitionId the partition id
*/
@SuppressWarnings("try")
public void instrumentProcess(Runnable oneRun, String method, String partitionId) {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
oneRun.run();
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span, method, partitionId);
}
throw e;
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @param method the method name
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Mono<Void> runAsync, String method) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
return runAsync.doOnError(e -> trackFailure(start, e, span, method, null))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, Context.current()))
.onErrorResume(e -> Mono.empty());
}
});
}
private void trackSuccess(Instant start, Span span) {
runDuration.record(getDuration(start), commonAttributes);
span.end();
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
recordError(errorType, unwrapped, method, partitionId);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param span the span to record attributes on
* @param options test parameters
*/
public void recordOptions(Span span, ScenarioOptions options) {
String libraryPackageVersion = "unknown";
try {
Class<?> libraryPackage = Class.forName(EventHubClientBuilder.class.getName());
libraryPackageVersion = libraryPackage.getPackage().getImplementationVersion();
if (libraryPackageVersion == null) {
libraryPackageVersion = "null";
}
} catch (ClassNotFoundException e) {
logger.atWarning()
.addKeyValue("class", EventHubClientBuilder.class.getName())
.log("Could not determine azure-eventhubs-messaging version, EventHubClientBuilder class is not found", e);
}
span.setAttribute(AttributeKey.longKey("durationSec"), options.getTestDuration().getSeconds());
span.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
span.setAttribute(AttributeKey.stringKey("packageVersion"), libraryPackageVersion);
span.setAttribute(AttributeKey.stringKey("eventHubName"), options.getEventHubsEventHubName());
span.setAttribute(AttributeKey.stringKey("consumerGroupName"), options.getEventHubsConsumerGroup());
span.setAttribute(AttributeKey.longKey("messageSize"), options.getMessageSize());
span.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
span.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
span.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
}
public Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
public void recordError(String errorReason, String method, String partitionId) {
recordError(errorReason, null, method, partitionId);
}
public <T extends Throwable> void recordError(T ex, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(ex);
recordError(unwrapped.getClass().getName(), unwrapped, method, partitionId);
}
private void recordError(String errorReason, Throwable ex, String method, String partitionId) {
AttributesBuilder attributesBuilder = Attributes.builder()
.put(AttributeKey.stringKey("error.type"), errorReason)
.put(AttributeKey.stringKey("method"), method);
if (partitionId != null) {
attributesBuilder.put(AttributeKey.stringKey("partitionId"), partitionId);
}
errorCounter.add(1, attributesBuilder.build());
LoggingEventBuilder log = logger.atError()
.addKeyValue("partitionId", partitionId)
.addKeyValue("error.type", errorReason)
.addKeyValue("method", method);
if (ex != null) {
log.log("test error", ex);
} else {
log.log("test error");
}
}
public void recordPartitionClosedEvent(CloseContext closeContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), closeContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("closed_reason"), closeContext.getCloseReason().toString())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
closedPartitionCounter.add(1, attributes);
}
public void recordPartitionInitializedEvent(InitializationContext initializationContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), initializationContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
initializedPartitionCounter.add(1, attributes);
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final OpenTelemetry OTEL;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private final String scenarioName;
private final Meter meter;
private final LongCounter closedPartitionCounter;
private final LongCounter initializedPartitionCounter;
private final LongCounter errorCounter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
private static OpenTelemetry init() {
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
return OpenTelemetry.noop();
}
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
String instanceId = System.getenv("CONTAINER_NAME");
OpenTelemetry otel = sdkBuilder
.addResourceCustomizer((resource, props) ->
instanceId == null ? resource : resource.toBuilder().put(AttributeKey.stringKey("service.instance.id"), instanceId).build())
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
}
/**
* Re-initializes logging to otel - necessary in spring applications
*/
public void initLogging() {
OpenTelemetryAppender.install(OTEL);
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
* @param method the method name
* @param partitionId the partition id
*/
@SuppressWarnings("try")
public void instrumentProcess(Runnable oneRun, String method, String partitionId) {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
oneRun.run();
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span, method, partitionId);
}
throw e;
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @param method the method name
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Mono<Void> runAsync, String method) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder(method).startSpan();
try (Scope s = span.makeCurrent()) {
return runAsync.doOnError(e -> trackFailure(start, e, span, method, null))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, Context.current()))
.onErrorResume(e -> Mono.empty());
}
});
}
private void trackSuccess(Instant start, Span span) {
runDuration.record(getDuration(start), commonAttributes);
span.end();
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
recordError(errorType, unwrapped, method, partitionId);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param span the span to record attributes on
* @param options test parameters
*/
public void recordOptions(Span span, ScenarioOptions options) {
String libraryPackageVersion = "unknown";
try {
Class<?> libraryPackage = Class.forName(EventHubClientBuilder.class.getName());
libraryPackageVersion = libraryPackage.getPackage().getImplementationVersion();
if (libraryPackageVersion == null) {
libraryPackageVersion = "null";
}
} catch (ClassNotFoundException e) {
logger.atWarning()
.addKeyValue("class", EventHubClientBuilder.class.getName())
.log("Could not determine azure-eventhubs-messaging version, EventHubClientBuilder class is not found", e);
}
span.setAttribute(AttributeKey.longKey("durationSec"), options.getTestDuration().getSeconds());
span.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
span.setAttribute(AttributeKey.stringKey("packageVersion"), libraryPackageVersion);
span.setAttribute(AttributeKey.stringKey("eventHubName"), options.getEventHubsEventHubName());
span.setAttribute(AttributeKey.stringKey("consumerGroupName"), options.getEventHubsConsumerGroup());
span.setAttribute(AttributeKey.longKey("messageSize"), options.getMessageSize());
span.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
span.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
span.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
}
public Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
public void recordError(String errorReason, String method, String partitionId) {
recordError(errorReason, null, method, partitionId);
}
public <T extends Throwable> void recordError(T ex, String method, String partitionId) {
Throwable unwrapped = Exceptions.unwrap(ex);
recordError(unwrapped.getClass().getName(), unwrapped, method, partitionId);
}
private void recordError(String errorReason, Throwable ex, String method, String partitionId) {
AttributesBuilder attributesBuilder = Attributes.builder()
.put(AttributeKey.stringKey("error.type"), errorReason)
.put(AttributeKey.stringKey("method"), method);
if (partitionId != null) {
attributesBuilder.put(AttributeKey.stringKey("partitionId"), partitionId);
}
errorCounter.add(1, attributesBuilder.build());
LoggingEventBuilder log = logger.atError()
.addKeyValue("partitionId", partitionId)
.addKeyValue("error.type", errorReason)
.addKeyValue("method", method);
if (ex != null) {
log.log("test error", ex);
} else {
log.log("test error");
}
}
public void recordPartitionClosedEvent(CloseContext closeContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), closeContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("closed_reason"), closeContext.getCloseReason().toString())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
closedPartitionCounter.add(1, attributes);
}
public void recordPartitionInitializedEvent(InitializationContext initializationContext, String processorId) {
Attributes attributes = Attributes.builder()
.put(AttributeKey.stringKey("partition_id"), initializationContext.getPartitionContext().getPartitionId())
.put(AttributeKey.stringKey("processor_id"), processorId)
.build();
initializedPartitionCounter.add(1, attributes);
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.