comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Do we need this short timeout? We already have a TIMEOUT constant defined that can be reused. You can bump the seconds up if you need. https://github.com/Azure/azure-sdk-for-java/blob/0902c492de42ed25164e22fc55ec388041ca12df/sdk/servicebus/azure-messaging-servicebus/src/test/java/com/azure/messaging/servicebus/ServiceBusSessionManagerTest.java#L69
void singleUnnamedSessionCleanupAfterTimeout() { Duration shortTimeout = Duration.ofSeconds(15); ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, MAX_LOCK_RENEWAL, false, null, 2); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); final String sessionId = "session-1"; final String lockToken = "a-lock-token"; final String linkName = "my-link-name"; final OffsetDateTime sessionLockedUntil = OffsetDateTime.now().plus(Duration.ofSeconds(30)); final Message message = mock(Message.class); final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getSessionId()).thenReturn(sessionId); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(amqpReceiveLink.getLinkName()).thenReturn(linkName); when(amqpReceiveLink.getSessionId()).thenReturn(Mono.just(sessionId)); when(amqpReceiveLink.getSessionLockedUntil()) .thenAnswer(invocation -> Mono.just(sessionLockedUntil)); when(connection.createReceiveLink(anyString(), eq(ENTITY_PATH), any(ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull())).thenReturn(Mono.just(amqpReceiveLink)); StepVerifier.create(sessionManager.receive()) .then(() -> { messageSink.next(message); }) .assertNext(context -> { assertMessageEquals(sessionId, receivedMessage, context); }) .then(() -> { try { assertNotNull(sessionManager.getLinkName(sessionId)); TimeUnit.SECONDS.sleep(TIMEOUT.getSeconds()); assertNull(sessionManager.getLinkName(sessionId)); } catch (InterruptedException e) { } }) .thenCancel() .verify(shortTimeout); }
Duration shortTimeout = Duration.ofSeconds(15);
void singleUnnamedSessionCleanupAfterTimeout() { ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, MAX_LOCK_RENEWAL, false, null, 2); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); final String sessionId = "session-1"; final String lockToken = "a-lock-token"; final String linkName = "my-link-name"; final OffsetDateTime sessionLockedUntil = OffsetDateTime.now().plus(Duration.ofSeconds(30)); final Message message = mock(Message.class); final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getSessionId()).thenReturn(sessionId); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(amqpReceiveLink.getLinkName()).thenReturn(linkName); when(amqpReceiveLink.getSessionId()).thenReturn(Mono.just(sessionId)); when(amqpReceiveLink.getSessionLockedUntil()) .thenAnswer(invocation -> Mono.just(sessionLockedUntil)); when(connection.createReceiveLink(anyString(), eq(ENTITY_PATH), any(ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull())).thenReturn(Mono.just(amqpReceiveLink)); StepVerifier.create(sessionManager.receive()) .then(() -> { messageSink.next(message); }) .assertNext(context -> { assertMessageEquals(sessionId, receivedMessage, context); }) .then(() -> { try { assertNotNull(sessionManager.getLinkName(sessionId)); TimeUnit.SECONDS.sleep(TIMEOUT.getSeconds()); assertNull(sessionManager.getLinkName(sessionId)); } catch (InterruptedException e) { } }) .thenCancel() .verify(TIMEOUT); }
class ServiceBusSessionManagerTest { private static final ClientOptions CLIENT_OPTIONS = new ClientOptions(); private static final Duration TIMEOUT = Duration.ofSeconds(10); private static final Duration MAX_LOCK_RENEWAL = Duration.ofSeconds(5); private static final String NAMESPACE = "my-namespace-foo.net"; private static final String ENTITY_PATH = "queue-name"; private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class); private final ReplayProcessor<AmqpEndpointState> endpointProcessor = ReplayProcessor.cacheLast(); private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final EmitterProcessor<Message> messageProcessor = EmitterProcessor.create(); private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); private ServiceBusConnectionProcessor connectionProcessor; private ServiceBusSessionManager sessionManager; @Mock private ServiceBusReceiveLink amqpReceiveLink; @Mock private ServiceBusAmqpConnection connection; @Mock private TokenCredential tokenCredential; @Mock private MessageSerializer messageSerializer; @Mock private ServiceBusManagementNode managementNode; @Captor private ArgumentCaptor<String> linkNameCaptor; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(60)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @BeforeEach void beforeEach(TestInfo testInfo) { logger.info("===== [{}] Setting up. =====", testInfo.getDisplayName()); MockitoAnnotations.initMocks(this); when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(amqpReceiveLink.getHostname()).thenReturn(NAMESPACE); when(amqpReceiveLink.getEntityPath()).thenReturn(ENTITY_PATH); when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential, CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions().setTryTimeout(TIMEOUT), ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic(), CLIENT_OPTIONS, SslDomain.VerifyMode.VERIFY_PEER_NAME); when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)) .thenReturn(Mono.just(managementNode)); connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection)) .subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); } @AfterEach void afterEach(TestInfo testInfo) { logger.info("===== [{}] Tearing down. =====", testInfo.getDisplayName()); if (sessionManager != null) { sessionManager.close(); } if (connectionProcessor != null) { connectionProcessor.dispose(); } Mockito.framework().clearInlineMocks(); } @Test void receiveNull() { ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, MAX_LOCK_RENEWAL, false, null, 5); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); StepVerifier.create(sessionManager.receive()) .expectError(NullPointerException.class) .verify(); } /** * Verify that when we receive for a single, unnamed session, when no more items are emitted, it completes. */ @Test void singleUnnamedSession() { ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, MAX_LOCK_RENEWAL, false, null, 5); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); final String sessionId = "session-1"; final String lockToken = "a-lock-token"; final String linkName = "my-link-name"; final OffsetDateTime sessionLockedUntil = OffsetDateTime.now().plus(Duration.ofSeconds(30)); final Message message = mock(Message.class); final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getSessionId()).thenReturn(sessionId); when(receivedMessage.getLockToken()).thenReturn(lockToken); final int numberOfMessages = 5; when(amqpReceiveLink.getLinkName()).thenReturn(linkName); when(amqpReceiveLink.getSessionId()).thenReturn(Mono.just(sessionId)); when(amqpReceiveLink.getSessionLockedUntil()) .thenAnswer(invocation -> Mono.just(sessionLockedUntil)); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); when(connection.createReceiveLink(anyString(), eq(ENTITY_PATH), any(ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull())).thenReturn(Mono.just(amqpReceiveLink)); when(managementNode.renewSessionLock(sessionId, linkName)).thenReturn( Mono.fromCallable(() -> OffsetDateTime.now().plus(Duration.ofSeconds(5)))); StepVerifier.create(sessionManager.receive()) .then(() -> { for (int i = 0; i < numberOfMessages; i++) { messageSink.next(message); } }) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .thenCancel() .verify(Duration.ofSeconds(45)); } /** * Verify that when we receive multiple sessions, it'll change to the next session when one is complete. */ @Test void multipleSessions() { final ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, MAX_LOCK_RENEWAL, true, null, 5); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); final int numberOfMessages = 5; final Callable<OffsetDateTime> onRenewal = () -> OffsetDateTime.now().plus(Duration.ofSeconds(5)); final String sessionId = "session-1"; final String lockToken = "a-lock-token"; final String linkName = "my-link-name"; final Message message = mock(Message.class); final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getSessionId()).thenReturn(sessionId); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(amqpReceiveLink.getLinkName()).thenReturn(linkName); when(amqpReceiveLink.getSessionId()).thenReturn(Mono.just(sessionId)); when(amqpReceiveLink.getSessionLockedUntil()).thenReturn(Mono.fromCallable(onRenewal)); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); final ServiceBusReceiveLink amqpReceiveLink2 = mock(ServiceBusReceiveLink.class); final Message message2 = mock(Message.class); final ServiceBusReceivedMessage receivedMessage2 = mock(ServiceBusReceivedMessage.class); final String sessionId2 = "session-2"; final String lockToken2 = "a-lock-token-2"; final String linkName2 = "my-link-name-2"; final TestPublisher<Message> messagePublisher2 = TestPublisher.create(); final Flux<Message> messageFlux2 = messagePublisher2.flux(); when(receivedMessage2.getSessionId()).thenReturn(sessionId2); when(receivedMessage2.getLockToken()).thenReturn(lockToken2); when(amqpReceiveLink2.receive()).thenReturn(messageFlux2); when(amqpReceiveLink2.getHostname()).thenReturn(NAMESPACE); when(amqpReceiveLink2.getEntityPath()).thenReturn(ENTITY_PATH); when(amqpReceiveLink2.getEndpointStates()).thenReturn(endpointProcessor); when(amqpReceiveLink2.getLinkName()).thenReturn(linkName2); when(amqpReceiveLink2.getSessionId()).thenReturn(Mono.just(sessionId2)); when(amqpReceiveLink2.getSessionLockedUntil()).thenReturn(Mono.fromCallable(onRenewal)); when(amqpReceiveLink2.updateDisposition(lockToken2, Accepted.getInstance())).thenReturn(Mono.empty()); final AtomicInteger count = new AtomicInteger(); when(connection.createReceiveLink(anyString(), eq(ENTITY_PATH), any(ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull())).thenAnswer(invocation -> { final int number = count.getAndIncrement(); switch (number) { case 0: return Mono.just(amqpReceiveLink); case 1: return Mono.just(amqpReceiveLink2); default: return Mono.empty(); } }); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2); when(managementNode.renewSessionLock(sessionId, linkName)).thenReturn(Mono.fromCallable(onRenewal)); when(managementNode.renewSessionLock(sessionId2, linkName2)).thenReturn(Mono.fromCallable(onRenewal)); StepVerifier.create(sessionManager.receive()) .then(() -> { for (int i = 0; i < numberOfMessages; i++) { messageSink.next(message); } }) .assertNext(context -> { System.out.println("1"); assertMessageEquals(sessionId, receivedMessage, context); }) .assertNext(context -> { System.out.println("2"); assertMessageEquals(sessionId, receivedMessage, context); }) .assertNext(context -> { System.out.println("3"); assertMessageEquals(sessionId, receivedMessage, context); }) .assertNext(context -> { System.out.println("4"); assertMessageEquals(sessionId, receivedMessage, context); }) .assertNext(context -> { System.out.println("5"); assertMessageEquals(sessionId, receivedMessage, context); }) .thenAwait(Duration.ofSeconds(13)) .then(() -> { for (int i = 0; i < 3; i++) { messagePublisher2.next(message2); } }) .assertNext(context -> { System.out.println("6"); assertMessageEquals(sessionId2, receivedMessage2, context); }) .assertNext(context -> { System.out.println("7"); assertMessageEquals(sessionId2, receivedMessage2, context); }) .assertNext(context -> { System.out.println("8"); assertMessageEquals(sessionId2, receivedMessage2, context); }) .thenAwait(Duration.ofSeconds(15)) .thenCancel() .verify(); } /** * Verify that when we can call multiple receive, it'll create a new link. */ @Test void multipleReceiveUnnamedSession() { final int expectedLinksCreated = 2; final Callable<OffsetDateTime> onRenewal = () -> OffsetDateTime.now().plus(Duration.ofSeconds(5)); final ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, Duration.ZERO, false, null, 1); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); final String sessionId = "session-1"; final String linkName = "my-link-name"; when(amqpReceiveLink.getLinkName()).thenReturn(linkName); when(amqpReceiveLink.getSessionId()).thenReturn(Mono.just(sessionId)); when(amqpReceiveLink.getSessionLockedUntil()).thenReturn(Mono.fromCallable(onRenewal)); final ServiceBusReceiveLink amqpReceiveLink2 = mock(ServiceBusReceiveLink.class); final String sessionId2 = "session-2"; final String linkName2 = "my-link-name-2"; final TestPublisher<Message> messagePublisher2 = TestPublisher.create(); final Flux<Message> messageFlux2 = messagePublisher2.flux(); when(amqpReceiveLink2.receive()).thenReturn(messageFlux2); when(amqpReceiveLink2.getHostname()).thenReturn(NAMESPACE); when(amqpReceiveLink2.getEntityPath()).thenReturn(ENTITY_PATH); when(amqpReceiveLink2.getEndpointStates()).thenReturn(endpointProcessor); when(amqpReceiveLink2.getLinkName()).thenReturn(linkName2); when(amqpReceiveLink2.getSessionId()).thenReturn(Mono.just(sessionId2)); when(amqpReceiveLink2.getSessionLockedUntil()).thenReturn(Mono.fromCallable(onRenewal)); final AtomicInteger count = new AtomicInteger(); when(connection.createReceiveLink(anyString(), eq(ENTITY_PATH), any(ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull())).thenAnswer(invocation -> { final int number = count.getAndIncrement(); switch (number) { case 0: return Mono.just(amqpReceiveLink); case 1: return Mono.just(amqpReceiveLink2); default: return Mono.empty(); } }); StepVerifier.create(sessionManager.receive()) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); StepVerifier.create(sessionManager.receive()) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); verify(connection, times(2)).createReceiveLink(linkNameCaptor.capture(), eq(ENTITY_PATH), any( ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull()); final List<String> actualLinksCreated = linkNameCaptor.getAllValues(); assertNotNull(actualLinksCreated); assertEquals(expectedLinksCreated, actualLinksCreated.size()); assertFalse(actualLinksCreated.get(0).equalsIgnoreCase(actualLinksCreated.get(1))); } /** * Validate that session-id specific session receiver is removed after {@link AmqpRetryOptions */ @Test private static void assertMessageEquals(String sessionId, ServiceBusReceivedMessage expected, ServiceBusMessageContext actual) { assertEquals(sessionId, actual.getSessionId()); assertNull(actual.getThrowable()); assertEquals(expected, actual.getMessage()); } }
class ServiceBusSessionManagerTest { private static final ClientOptions CLIENT_OPTIONS = new ClientOptions(); private static final Duration TIMEOUT = Duration.ofSeconds(10); private static final Duration MAX_LOCK_RENEWAL = Duration.ofSeconds(5); private static final String NAMESPACE = "my-namespace-foo.net"; private static final String ENTITY_PATH = "queue-name"; private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class); private final ReplayProcessor<AmqpEndpointState> endpointProcessor = ReplayProcessor.cacheLast(); private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final EmitterProcessor<Message> messageProcessor = EmitterProcessor.create(); private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); private ServiceBusConnectionProcessor connectionProcessor; private ServiceBusSessionManager sessionManager; @Mock private ServiceBusReceiveLink amqpReceiveLink; @Mock private ServiceBusAmqpConnection connection; @Mock private TokenCredential tokenCredential; @Mock private MessageSerializer messageSerializer; @Mock private ServiceBusManagementNode managementNode; @Captor private ArgumentCaptor<String> linkNameCaptor; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(60)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @BeforeEach void beforeEach(TestInfo testInfo) { logger.info("===== [{}] Setting up. =====", testInfo.getDisplayName()); MockitoAnnotations.initMocks(this); when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(amqpReceiveLink.getHostname()).thenReturn(NAMESPACE); when(amqpReceiveLink.getEntityPath()).thenReturn(ENTITY_PATH); when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential, CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions().setTryTimeout(TIMEOUT), ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic(), CLIENT_OPTIONS, SslDomain.VerifyMode.VERIFY_PEER_NAME); when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)) .thenReturn(Mono.just(managementNode)); connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection)) .subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); } @AfterEach void afterEach(TestInfo testInfo) { logger.info("===== [{}] Tearing down. =====", testInfo.getDisplayName()); if (sessionManager != null) { sessionManager.close(); } if (connectionProcessor != null) { connectionProcessor.dispose(); } Mockito.framework().clearInlineMocks(); } @Test void receiveNull() { ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, MAX_LOCK_RENEWAL, false, null, 5); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); StepVerifier.create(sessionManager.receive()) .expectError(NullPointerException.class) .verify(); } /** * Verify that when we receive for a single, unnamed session, when no more items are emitted, it completes. */ @Test void singleUnnamedSession() { ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, MAX_LOCK_RENEWAL, false, null, 5); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); final String sessionId = "session-1"; final String lockToken = "a-lock-token"; final String linkName = "my-link-name"; final OffsetDateTime sessionLockedUntil = OffsetDateTime.now().plus(Duration.ofSeconds(30)); final Message message = mock(Message.class); final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getSessionId()).thenReturn(sessionId); when(receivedMessage.getLockToken()).thenReturn(lockToken); final int numberOfMessages = 5; when(amqpReceiveLink.getLinkName()).thenReturn(linkName); when(amqpReceiveLink.getSessionId()).thenReturn(Mono.just(sessionId)); when(amqpReceiveLink.getSessionLockedUntil()) .thenAnswer(invocation -> Mono.just(sessionLockedUntil)); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); when(connection.createReceiveLink(anyString(), eq(ENTITY_PATH), any(ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull())).thenReturn(Mono.just(amqpReceiveLink)); when(managementNode.renewSessionLock(sessionId, linkName)).thenReturn( Mono.fromCallable(() -> OffsetDateTime.now().plus(Duration.ofSeconds(5)))); StepVerifier.create(sessionManager.receive()) .then(() -> { for (int i = 0; i < numberOfMessages; i++) { messageSink.next(message); } }) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .assertNext(context -> assertMessageEquals(sessionId, receivedMessage, context)) .thenCancel() .verify(Duration.ofSeconds(45)); } /** * Verify that when we receive multiple sessions, it'll change to the next session when one is complete. */ @Test void multipleSessions() { final ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, MAX_LOCK_RENEWAL, true, null, 5); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); final int numberOfMessages = 5; final Callable<OffsetDateTime> onRenewal = () -> OffsetDateTime.now().plus(Duration.ofSeconds(5)); final String sessionId = "session-1"; final String lockToken = "a-lock-token"; final String linkName = "my-link-name"; final Message message = mock(Message.class); final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getSessionId()).thenReturn(sessionId); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(amqpReceiveLink.getLinkName()).thenReturn(linkName); when(amqpReceiveLink.getSessionId()).thenReturn(Mono.just(sessionId)); when(amqpReceiveLink.getSessionLockedUntil()).thenReturn(Mono.fromCallable(onRenewal)); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); final ServiceBusReceiveLink amqpReceiveLink2 = mock(ServiceBusReceiveLink.class); final Message message2 = mock(Message.class); final ServiceBusReceivedMessage receivedMessage2 = mock(ServiceBusReceivedMessage.class); final String sessionId2 = "session-2"; final String lockToken2 = "a-lock-token-2"; final String linkName2 = "my-link-name-2"; final TestPublisher<Message> messagePublisher2 = TestPublisher.create(); final Flux<Message> messageFlux2 = messagePublisher2.flux(); when(receivedMessage2.getSessionId()).thenReturn(sessionId2); when(receivedMessage2.getLockToken()).thenReturn(lockToken2); when(amqpReceiveLink2.receive()).thenReturn(messageFlux2); when(amqpReceiveLink2.getHostname()).thenReturn(NAMESPACE); when(amqpReceiveLink2.getEntityPath()).thenReturn(ENTITY_PATH); when(amqpReceiveLink2.getEndpointStates()).thenReturn(endpointProcessor); when(amqpReceiveLink2.getLinkName()).thenReturn(linkName2); when(amqpReceiveLink2.getSessionId()).thenReturn(Mono.just(sessionId2)); when(amqpReceiveLink2.getSessionLockedUntil()).thenReturn(Mono.fromCallable(onRenewal)); when(amqpReceiveLink2.updateDisposition(lockToken2, Accepted.getInstance())).thenReturn(Mono.empty()); final AtomicInteger count = new AtomicInteger(); when(connection.createReceiveLink(anyString(), eq(ENTITY_PATH), any(ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull())).thenAnswer(invocation -> { final int number = count.getAndIncrement(); switch (number) { case 0: return Mono.just(amqpReceiveLink); case 1: return Mono.just(amqpReceiveLink2); default: return Mono.empty(); } }); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2); when(managementNode.renewSessionLock(sessionId, linkName)).thenReturn(Mono.fromCallable(onRenewal)); when(managementNode.renewSessionLock(sessionId2, linkName2)).thenReturn(Mono.fromCallable(onRenewal)); StepVerifier.create(sessionManager.receive()) .then(() -> { for (int i = 0; i < numberOfMessages; i++) { messageSink.next(message); } }) .assertNext(context -> { System.out.println("1"); assertMessageEquals(sessionId, receivedMessage, context); }) .assertNext(context -> { System.out.println("2"); assertMessageEquals(sessionId, receivedMessage, context); }) .assertNext(context -> { System.out.println("3"); assertMessageEquals(sessionId, receivedMessage, context); }) .assertNext(context -> { System.out.println("4"); assertMessageEquals(sessionId, receivedMessage, context); }) .assertNext(context -> { System.out.println("5"); assertMessageEquals(sessionId, receivedMessage, context); }) .thenAwait(Duration.ofSeconds(13)) .then(() -> { for (int i = 0; i < 3; i++) { messagePublisher2.next(message2); } }) .assertNext(context -> { System.out.println("6"); assertMessageEquals(sessionId2, receivedMessage2, context); }) .assertNext(context -> { System.out.println("7"); assertMessageEquals(sessionId2, receivedMessage2, context); }) .assertNext(context -> { System.out.println("8"); assertMessageEquals(sessionId2, receivedMessage2, context); }) .thenAwait(Duration.ofSeconds(15)) .thenCancel() .verify(); } /** * Verify that when we can call multiple receive, it'll create a new link. */ @Test void multipleReceiveUnnamedSession() { final int expectedLinksCreated = 2; final Callable<OffsetDateTime> onRenewal = () -> OffsetDateTime.now().plus(Duration.ofSeconds(5)); final ReceiverOptions receiverOptions = new ReceiverOptions(ServiceBusReceiveMode.PEEK_LOCK, 1, Duration.ZERO, false, null, 1); sessionManager = new ServiceBusSessionManager(ENTITY_PATH, ENTITY_TYPE, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); final String sessionId = "session-1"; final String linkName = "my-link-name"; when(amqpReceiveLink.getLinkName()).thenReturn(linkName); when(amqpReceiveLink.getSessionId()).thenReturn(Mono.just(sessionId)); when(amqpReceiveLink.getSessionLockedUntil()).thenReturn(Mono.fromCallable(onRenewal)); final ServiceBusReceiveLink amqpReceiveLink2 = mock(ServiceBusReceiveLink.class); final String sessionId2 = "session-2"; final String linkName2 = "my-link-name-2"; final TestPublisher<Message> messagePublisher2 = TestPublisher.create(); final Flux<Message> messageFlux2 = messagePublisher2.flux(); when(amqpReceiveLink2.receive()).thenReturn(messageFlux2); when(amqpReceiveLink2.getHostname()).thenReturn(NAMESPACE); when(amqpReceiveLink2.getEntityPath()).thenReturn(ENTITY_PATH); when(amqpReceiveLink2.getEndpointStates()).thenReturn(endpointProcessor); when(amqpReceiveLink2.getLinkName()).thenReturn(linkName2); when(amqpReceiveLink2.getSessionId()).thenReturn(Mono.just(sessionId2)); when(amqpReceiveLink2.getSessionLockedUntil()).thenReturn(Mono.fromCallable(onRenewal)); final AtomicInteger count = new AtomicInteger(); when(connection.createReceiveLink(anyString(), eq(ENTITY_PATH), any(ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull())).thenAnswer(invocation -> { final int number = count.getAndIncrement(); switch (number) { case 0: return Mono.just(amqpReceiveLink); case 1: return Mono.just(amqpReceiveLink2); default: return Mono.empty(); } }); StepVerifier.create(sessionManager.receive()) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); StepVerifier.create(sessionManager.receive()) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); verify(connection, times(2)).createReceiveLink(linkNameCaptor.capture(), eq(ENTITY_PATH), any( ServiceBusReceiveMode.class), isNull(), any(MessagingEntityType.class), isNull()); final List<String> actualLinksCreated = linkNameCaptor.getAllValues(); assertNotNull(actualLinksCreated); assertEquals(expectedLinksCreated, actualLinksCreated.size()); assertFalse(actualLinksCreated.get(0).equalsIgnoreCase(actualLinksCreated.get(1))); } /** * Validate that session-id specific session receiver is removed after {@link AmqpRetryOptions */ @Test private static void assertMessageEquals(String sessionId, ServiceBusReceivedMessage expected, ServiceBusMessageContext actual) { assertEquals(sessionId, actual.getSessionId()); assertNull(actual.getThrowable()); assertEquals(expected, actual.getMessage()); } }
Is it worth abstracting it out as a contract/API, and leverage on related places for consistency?
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
This log message should instead be `"Closing session receiver for session id {}"`. "Adding scheduler back to pool" is not a very useful debug message for the customer or for us to troubleshoot.
private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { logger.verbose("Adding scheduler back to pool for session-id [{}].", sessionReceiver.getSessionId()); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.close(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })) .publishOn(scheduler, 1); }
logger.verbose("Adding scheduler back to pool for session-id [{}].", sessionReceiver.getSessionId());
private Flux<ServiceBusMessageContext> getSession(Scheduler scheduler, boolean disposeOnIdle) { return getActiveLink().flatMap(link -> link.getSessionId() .map(sessionId -> sessionReceivers.compute(sessionId, (key, existing) -> { if (existing != null) { return existing; } return new ServiceBusSessionReceiver(link, messageSerializer, connectionProcessor.getRetryOptions(), receiverOptions.getPrefetchCount(), disposeOnIdle, scheduler, this::renewSessionLock, maxSessionLockRenewDuration); }))) .flatMapMany(sessionReceiver -> sessionReceiver.receive().doFinally(signalType -> { logger.verbose("Closing session receiver for session id [{}].", sessionReceiver.getSessionId()); availableSchedulers.push(scheduler); sessionReceivers.remove(sessionReceiver.getSessionId()); sessionReceiver.close(); if (receiverOptions.isRollingSessionReceiver()) { onSessionRequest(1L); } })) .publishOn(scheduler, 1); }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private final ClientLogger logger = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, ReceiverOptions receiverOptions) { this(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions, null); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } for (Scheduler scheduler : schedulers) { scheduler.dispose(); } sessionReceivers.values().forEach(receiver -> receiver.close()); sessionReceiveSink.complete(); } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .takeUntil(e -> e == AmqpEndpointState.ACTIVE) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); logger.info("entityPath[{}] attempt[{}]. Error occurred while getting unnamed session.", entityPath, signal.totalRetriesInARow(), failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); } /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { logger.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } logger.verbose("Requested {} unnamed sessions.", request); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { logger.verbose("request[{}]: There are no available schedulers to fetch.", request); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(logger, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(logger, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
class ServiceBusSessionManager implements AutoCloseable { private static final Duration SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION = Duration.ofMinutes(1); private final ClientLogger logger = new ClientLogger(ServiceBusSessionManager.class); private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusReceiveLink receiveLink; private final ServiceBusConnectionProcessor connectionProcessor; private final Duration operationTimeout; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicBoolean isStarted = new AtomicBoolean(); private final List<Scheduler> schedulers; private final Deque<Scheduler> availableSchedulers = new ConcurrentLinkedDeque<>(); private final Duration maxSessionLockRenewDuration; /** * SessionId to receiver mapping. */ private final ConcurrentHashMap<String, ServiceBusSessionReceiver> sessionReceivers = new ConcurrentHashMap<>(); private final EmitterProcessor<Flux<ServiceBusMessageContext>> processor; private final FluxSink<Flux<ServiceBusMessageContext>> sessionReceiveSink; private volatile Flux<ServiceBusMessageContext> receiveFlux; ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, ReceiverOptions receiverOptions, ServiceBusReceiveLink receiveLink) { this.entityPath = entityPath; this.entityType = entityType; this.receiverOptions = receiverOptions; this.connectionProcessor = connectionProcessor; this.operationTimeout = connectionProcessor.getRetryOptions().getTryTimeout(); this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.maxSessionLockRenewDuration = receiverOptions.getMaxLockRenewDuration(); final int numberOfSchedulers = receiverOptions.isRollingSessionReceiver() ? receiverOptions.getMaxConcurrentSessions() : 1; final List<Scheduler> schedulerList = IntStream.range(0, numberOfSchedulers) .mapToObj(index -> Schedulers.newBoundedElastic(DEFAULT_BOUNDED_ELASTIC_SIZE, DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, "receiver-" + index)) .collect(Collectors.toList()); this.schedulers = Collections.unmodifiableList(schedulerList); this.availableSchedulers.addAll(this.schedulers); this.processor = EmitterProcessor.create(numberOfSchedulers, false); this.sessionReceiveSink = processor.sink(); this.receiveLink = receiveLink; } ServiceBusSessionManager(String entityPath, MessagingEntityType entityType, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, ReceiverOptions receiverOptions) { this(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions, null); } /** * Gets the link name with the matching {@code sessionId}. * * @param sessionId Session id to get link name for. * * @return The name of the link, or {@code null} if there is no open link with that {@code sessionId}. */ String getLinkName(String sessionId) { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); return receiver != null ? receiver.getLinkName() : null; } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<byte[]> getSessionState(String sessionId) { return validateParameter(sessionId, "sessionId", "getSessionState").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.getSessionState(sessionId, associatedLinkName); })); } /** * Gets a stream of messages from different sessions. * * @return A Flux of messages merged from different sessions. */ Flux<ServiceBusMessageContext> receive() { if (!isStarted.getAndSet(true)) { this.sessionReceiveSink.onRequest(this::onSessionRequest); if (!receiverOptions.isRollingSessionReceiver()) { receiveFlux = getSession(schedulers.get(0), false); } else { receiveFlux = Flux.merge(processor, receiverOptions.getMaxConcurrentSessions()); } } return receiveFlux; } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ Mono<OffsetDateTime> renewSessionLock(String sessionId) { return validateParameter(sessionId, "sessionId", "renewSessionLock").then( getManagementNode().flatMap(channel -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); final String associatedLinkName = receiver != null ? receiver.getLinkName() : null; return channel.renewSessionLock(sessionId, associatedLinkName).handle((offsetDateTime, sink) -> { if (receiver != null) { receiver.setSessionLockedUntil(offsetDateTime); } sink.next(offsetDateTime); }); })); } /** * Tries to update the message disposition on a session aware receive link. * * @return {@code true} if the {@code lockToken} was updated on receive link. {@code false} otherwise. This means * there isn't an open link with that {@code sessionId}. */ Mono<Boolean> updateDisposition(String lockToken, String sessionId, DispositionStatus dispositionStatus, Map<String, Object> propertiesToModify, String deadLetterReason, String deadLetterDescription, ServiceBusTransactionContext transactionContext) { final String operation = "updateDisposition"; return Mono.when( validateParameter(lockToken, "lockToken", operation), validateParameter(lockToken, "lockToken", operation), validateParameter(sessionId, "'sessionId'", operation)).then( Mono.defer(() -> { final ServiceBusSessionReceiver receiver = sessionReceivers.get(sessionId); if (receiver == null || !receiver.containsLockToken(lockToken)) { return Mono.just(false); } final DeliveryState deliveryState = MessageUtils.getDeliveryState(dispositionStatus, deadLetterReason, deadLetterDescription, propertiesToModify, transactionContext); return receiver.updateDisposition(lockToken, deliveryState).thenReturn(true); })); } @Override public void close() { if (isDisposed.getAndSet(true)) { return; } for (Scheduler scheduler : schedulers) { scheduler.dispose(); } sessionReceivers.values().forEach(receiver -> receiver.close()); sessionReceiveSink.complete(); } private AmqpErrorContext getErrorContext() { return new SessionErrorContext(connectionProcessor.getFullyQualifiedNamespace(), entityPath); } /** * Creates an session receive link. * * @return A Mono that completes with an session receive link. */ private Mono<ServiceBusReceiveLink> createSessionReceiveLink() { final String sessionId = receiverOptions.getSessionId(); final String linkName = (sessionId != null) ? sessionId : StringUtil.getRandomString("session-"); return connectionProcessor .flatMap(connection -> { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, sessionId); }); } /** * Gets an active unnamed session link. * * @return A Mono that completes when an unnamed session becomes available. * @throws AmqpException if the session manager is already disposed. */ Mono<ServiceBusReceiveLink> getActiveLink() { if (this.receiveLink != null) { return Mono.just(this.receiveLink); } return Mono.defer(() -> createSessionReceiveLink() .flatMap(link -> link.getEndpointStates() .takeUntil(e -> e == AmqpEndpointState.ACTIVE) .timeout(operationTimeout) .then(Mono.just(link)))) .retryWhen(Retry.from(retrySignals -> retrySignals.flatMap(signal -> { final Throwable failure = signal.failure(); logger.info("entityPath[{}] attempt[{}]. Error occurred while getting unnamed session.", entityPath, signal.totalRetriesInARow(), failure); if (isDisposed.get()) { return Mono.<Long>error(new AmqpException(false, "SessionManager is already disposed.", failure, getErrorContext())); } else if (failure instanceof TimeoutException) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else if (failure instanceof AmqpException && ((AmqpException) failure).getErrorCondition() == AmqpErrorCondition.TIMEOUT_ERROR) { return Mono.delay(SLEEP_DURATION_ON_ACCEPT_SESSION_EXCEPTION); } else { return Mono.<Long>error(failure); } }))); } /** * Gets the next available unnamed session with the given receive options and publishes its contents on the given * {@code scheduler}. * * @param scheduler Scheduler to coordinate received methods on. * @param disposeOnIdle true to dispose receiver when it idles; false otherwise. * @return A Mono that completes with an unnamed session receiver. */ private Mono<ServiceBusManagementNode> getManagementNode() { return connectionProcessor.flatMap(connection -> connection.getManagementNode(entityPath, entityType)); } /** * Emits a new unnamed active session when it becomes available. * * @param request Number of unnamed active sessions to emit. */ private void onSessionRequest(long request) { if (isDisposed.get()) { logger.info("Session manager is disposed. Not emitting more unnamed sessions."); return; } logger.verbose("Requested {} unnamed sessions.", request); for (int i = 0; i < request; i++) { final Scheduler scheduler = availableSchedulers.poll(); if (scheduler == null) { if (request != Long.MAX_VALUE) { logger.verbose("request[{}]: There are no available schedulers to fetch.", request); } return; } Flux<ServiceBusMessageContext> session = getSession(scheduler, true); sessionReceiveSink.next(session); } } private <T> Mono<Void> validateParameter(T parameter, String parameterName, String operation) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, operation))); } else if (parameter == null) { return monoError(logger, new NullPointerException(String.format("'%s' cannot be null.", parameterName))); } else if ((parameter instanceof String) && (((String) parameter).isEmpty())) { return monoError(logger, new IllegalArgumentException(String.format("'%s' cannot be an empty string.", parameterName))); } else { return Mono.empty(); } } }
add `Assertion.fail()` in the next
public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword("abc!@ .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); }
virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id());
public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword("abc!@ .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_SOUTH_CENTRAL; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withRootPassword(password()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withRootPassword(password()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withRootPassword(password()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withRootPassword("afh123RVS!") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags, virtualMachine.innerModel().tags()); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withRootPassword("afh123RVS!") .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withRootPassword("afh123RVS!") .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); ResourceManagerUtils.sleep(Duration.ofMinutes(30)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() == 0); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withRootPassword("BaR@12! .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_SOUTH_CENTRAL; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withRootPassword(password()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withRootPassword(password()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withRootPassword(password()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withRootPassword("afh123RVS!") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags, virtualMachine.innerModel().tags()); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withRootPassword("afh123RVS!") .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withRootPassword("afh123RVS!") .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); ResourceManagerUtils.sleep(Duration.ofMinutes(30)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() == 0); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withRootPassword("BaR@12! .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
Can we always expect that serverKey().getAuthority() to match the URI physicalAddress.getAuthority()?
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
If not mistaken: ` this.serverKey = RntbdUtils.getServerKey(physicalAddress); ` So it might be required to change at line 539 to match this expected computation: ` return endpoints.computeIfAbsent(RntbdUtils.getServerKey(physicalAddress).getAuthority(), authority -> new RntbdServiceEndpoint(`
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
Good observation , I noticed that as well, RntbdUtils.getServerKey(physicalAddress) build the uri with scheme, host and port, ignoring rest. So getAuthority for both return the same host and port. It is covered in the test case. However we could still create extra URI as you mentioned in 539, but it will be redundant object creation.
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
Can you add a comment at line 539 with this explanation, why we choose this and avoid the creation of the redundant object? Otherwise from an outsider perspective it still looks like a potential mismatch :-)
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
Is there a test to cover it E2E set-up?
public Builder(ConnectionPolicy connectionPolicy) { this.bufferPageSize = DEFAULT_OPTIONS.bufferPageSize; this.connectionAcquisitionTimeout = DEFAULT_OPTIONS.connectionAcquisitionTimeout; this.connectionEndpointRediscoveryEnabled = connectionPolicy.isTcpConnectionEndpointRediscoveryEnabled(); this.connectTimeout = connectionPolicy.getConnectTimeout(); this.idleChannelTimeout = connectionPolicy.getIdleTcpConnectionTimeout(); this.idleChannelTimerResolution = DEFAULT_OPTIONS.idleChannelTimerResolution; this.idleEndpointTimeout = connectionPolicy.getIdleTcpEndpointTimeout(); this.maxBufferCapacity = DEFAULT_OPTIONS.maxBufferCapacity; this.maxChannelsPerEndpoint = connectionPolicy.getMaxConnectionsPerEndpoint(); this.maxRequestsPerChannel = connectionPolicy.getMaxRequestsPerConnection(); this.maxConcurrentRequestsPerEndpointOverride = DEFAULT_OPTIONS.maxConcurrentRequestsPerEndpointOverride; this.receiveHangDetectionTime = DEFAULT_OPTIONS.receiveHangDetectionTime; this.requestTimeout = connectionPolicy.getRequestTimeout(); this.requestTimerResolution = DEFAULT_OPTIONS.requestTimerResolution; this.sendHangDetectionTime = DEFAULT_OPTIONS.sendHangDetectionTime; this.shutdownTimeout = DEFAULT_OPTIONS.shutdownTimeout; this.threadCount = DEFAULT_OPTIONS.threadCount; this.userAgent = DEFAULT_OPTIONS.userAgent; }
this.idleEndpointTimeout = connectionPolicy.getIdleTcpEndpointTimeout();
public Builder(ConnectionPolicy connectionPolicy) { this.bufferPageSize = DEFAULT_OPTIONS.bufferPageSize; this.connectionAcquisitionTimeout = DEFAULT_OPTIONS.connectionAcquisitionTimeout; this.connectionEndpointRediscoveryEnabled = connectionPolicy.isTcpConnectionEndpointRediscoveryEnabled(); this.connectTimeout = connectionPolicy.getConnectTimeout(); this.idleChannelTimeout = connectionPolicy.getIdleTcpConnectionTimeout(); this.idleChannelTimerResolution = DEFAULT_OPTIONS.idleChannelTimerResolution; this.idleEndpointTimeout = connectionPolicy.getIdleTcpEndpointTimeout(); this.maxBufferCapacity = DEFAULT_OPTIONS.maxBufferCapacity; this.maxChannelsPerEndpoint = connectionPolicy.getMaxConnectionsPerEndpoint(); this.maxRequestsPerChannel = connectionPolicy.getMaxRequestsPerConnection(); this.maxConcurrentRequestsPerEndpointOverride = DEFAULT_OPTIONS.maxConcurrentRequestsPerEndpointOverride; this.receiveHangDetectionTime = DEFAULT_OPTIONS.receiveHangDetectionTime; this.requestTimeout = connectionPolicy.getRequestTimeout(); this.requestTimerResolution = DEFAULT_OPTIONS.requestTimerResolution; this.sendHangDetectionTime = DEFAULT_OPTIONS.sendHangDetectionTime; this.shutdownTimeout = DEFAULT_OPTIONS.shutdownTimeout; this.threadCount = DEFAULT_OPTIONS.threadCount; this.userAgent = DEFAULT_OPTIONS.userAgent; }
class Builder { private static final String DEFAULT_OPTIONS_PROPERTY_NAME = "azure.cosmos.directTcp.defaultOptions"; private static final Options DEFAULT_OPTIONS; static { Options options = null; try { final String string = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME); if (string != null) { try { options = RntbdObjectMapper.readValue(string, Options.class); } catch (IOException error) { logger.error("failed to parse default Direct TCP options {} due to ", string, error); } } if (options == null) { final String path = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME + "File"); if (path != null) { try { options = RntbdObjectMapper.readValue(new File(path), Options.class); } catch (IOException error) { logger.error("failed to load default Direct TCP options from {} due to ", path, error); } } } if (options == null) { final ClassLoader loader = RntbdTransportClient.class.getClassLoader(); final String name = DEFAULT_OPTIONS_PROPERTY_NAME + ".json"; try (InputStream stream = loader.getResourceAsStream(name)) { if (stream != null) { options = RntbdObjectMapper.readValue(stream, Options.class); } } catch (IOException error) { logger.error("failed to load Direct TCP options from resource {} due to ", name, error); } } } finally { if (options == null) { logger.info("Using default Direct TCP options: {}", DEFAULT_OPTIONS_PROPERTY_NAME); DEFAULT_OPTIONS = new Options(ConnectionPolicy.getDefaultPolicy()); } else { logger.info("Updated default Direct TCP options from system property {}: {}", DEFAULT_OPTIONS_PROPERTY_NAME, options); DEFAULT_OPTIONS = options; } } } private int bufferPageSize; private Duration connectionAcquisitionTimeout; private boolean connectionEndpointRediscoveryEnabled; private Duration connectTimeout; private Duration idleChannelTimeout; private Duration idleChannelTimerResolution; private Duration idleEndpointTimeout; private int maxBufferCapacity; private int maxChannelsPerEndpoint; private int maxRequestsPerChannel; private int maxConcurrentRequestsPerEndpointOverride; private Duration receiveHangDetectionTime; private Duration requestTimeout; private Duration requestTimerResolution; private Duration sendHangDetectionTime; private Duration shutdownTimeout; private int threadCount; private UserAgentContainer userAgent; public Builder bufferPageSize(final int value) { checkArgument(value >= 4096 && (value & (value - 1)) == 0, "expected value to be a power of 2 >= 4096, not %s", value); this.bufferPageSize = value; return this; } public Options build() { checkState(this.bufferPageSize <= this.maxBufferCapacity, "expected bufferPageSize (%s) <= maxBufferCapacity (%s)", this.bufferPageSize, this.maxBufferCapacity); return new Options(this); } public Builder connectionAcquisitionTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.connectionAcquisitionTimeout = value.compareTo(Duration.ZERO) < 0 ? Duration.ZERO : value; return this; } public Builder connectionEndpointRediscoveryEnabled(final boolean value) { this.connectionEndpointRediscoveryEnabled = value; return this; } public Builder connectionTimeout(final Duration value) { checkArgument(value == null || value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.connectTimeout = value; return this; } public Builder idleChannelTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.idleChannelTimeout = value; return this; } public Builder idleChannelTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) <= 0, "expected positive value, not %s", value); this.idleChannelTimerResolution = value; return this; } public Builder idleEndpointTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.idleEndpointTimeout = value; return this; } public Builder maxBufferCapacity(final int value) { checkArgument(value > 0 && (value & (value - 1)) == 0, "expected positive value, not %s", value); this.maxBufferCapacity = value; return this; } public Builder maxChannelsPerEndpoint(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxChannelsPerEndpoint = value; return this; } public Builder maxRequestsPerChannel(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxRequestsPerChannel = value; return this; } public Builder maxConcurrentRequestsPerEndpointOverride(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxConcurrentRequestsPerEndpointOverride = value; return this; } public Builder receiveHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.receiveHangDetectionTime = value; return this; } public Builder requestTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimeout = value; return this; } public Builder requestTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimerResolution = value; return this; } public Builder sendHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.sendHangDetectionTime = value; return this; } public Builder shutdownTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.shutdownTimeout = value; return this; } public Builder threadCount(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.threadCount = value; return this; } public Builder userAgent(final UserAgentContainer value) { checkNotNull(value, "expected non-null value"); this.userAgent = value; return this; } }
class Builder { private static final String DEFAULT_OPTIONS_PROPERTY_NAME = "azure.cosmos.directTcp.defaultOptions"; private static final Options DEFAULT_OPTIONS; static { Options options = null; try { final String string = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME); if (string != null) { try { options = RntbdObjectMapper.readValue(string, Options.class); } catch (IOException error) { logger.error("failed to parse default Direct TCP options {} due to ", string, error); } } if (options == null) { final String path = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME + "File"); if (path != null) { try { options = RntbdObjectMapper.readValue(new File(path), Options.class); } catch (IOException error) { logger.error("failed to load default Direct TCP options from {} due to ", path, error); } } } if (options == null) { final ClassLoader loader = RntbdTransportClient.class.getClassLoader(); final String name = DEFAULT_OPTIONS_PROPERTY_NAME + ".json"; try (InputStream stream = loader.getResourceAsStream(name)) { if (stream != null) { options = RntbdObjectMapper.readValue(stream, Options.class); } } catch (IOException error) { logger.error("failed to load Direct TCP options from resource {} due to ", name, error); } } } finally { if (options == null) { logger.info("Using default Direct TCP options: {}", DEFAULT_OPTIONS_PROPERTY_NAME); DEFAULT_OPTIONS = new Options(ConnectionPolicy.getDefaultPolicy()); } else { logger.info("Updated default Direct TCP options from system property {}: {}", DEFAULT_OPTIONS_PROPERTY_NAME, options); DEFAULT_OPTIONS = options; } } } private int bufferPageSize; private Duration connectionAcquisitionTimeout; private boolean connectionEndpointRediscoveryEnabled; private Duration connectTimeout; private Duration idleChannelTimeout; private Duration idleChannelTimerResolution; private Duration idleEndpointTimeout; private int maxBufferCapacity; private int maxChannelsPerEndpoint; private int maxRequestsPerChannel; private int maxConcurrentRequestsPerEndpointOverride; private Duration receiveHangDetectionTime; private Duration requestTimeout; private Duration requestTimerResolution; private Duration sendHangDetectionTime; private Duration shutdownTimeout; private int threadCount; private UserAgentContainer userAgent; public Builder bufferPageSize(final int value) { checkArgument(value >= 4096 && (value & (value - 1)) == 0, "expected value to be a power of 2 >= 4096, not %s", value); this.bufferPageSize = value; return this; } public Options build() { checkState(this.bufferPageSize <= this.maxBufferCapacity, "expected bufferPageSize (%s) <= maxBufferCapacity (%s)", this.bufferPageSize, this.maxBufferCapacity); return new Options(this); } public Builder connectionAcquisitionTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.connectionAcquisitionTimeout = value.compareTo(Duration.ZERO) < 0 ? Duration.ZERO : value; return this; } public Builder connectionEndpointRediscoveryEnabled(final boolean value) { this.connectionEndpointRediscoveryEnabled = value; return this; } public Builder connectionTimeout(final Duration value) { checkArgument(value == null || value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.connectTimeout = value; return this; } public Builder idleChannelTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.idleChannelTimeout = value; return this; } public Builder idleChannelTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) <= 0, "expected positive value, not %s", value); this.idleChannelTimerResolution = value; return this; } public Builder idleEndpointTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.idleEndpointTimeout = value; return this; } public Builder maxBufferCapacity(final int value) { checkArgument(value > 0 && (value & (value - 1)) == 0, "expected positive value, not %s", value); this.maxBufferCapacity = value; return this; } public Builder maxChannelsPerEndpoint(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxChannelsPerEndpoint = value; return this; } public Builder maxRequestsPerChannel(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxRequestsPerChannel = value; return this; } public Builder maxConcurrentRequestsPerEndpointOverride(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxConcurrentRequestsPerEndpointOverride = value; return this; } public Builder receiveHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.receiveHangDetectionTime = value; return this; } public Builder requestTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimeout = value; return this; } public Builder requestTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimerResolution = value; return this; } public Builder sendHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.sendHangDetectionTime = value; return this; } public Builder shutdownTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.shutdownTimeout = value; return this; } public Builder threadCount(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.threadCount = value; return this; } public Builder userAgent(final UserAgentContainer value) { checkNotNull(value, "expected non-null value"); this.userAgent = value; return this; } }
Yes newly added test covering this
public Builder(ConnectionPolicy connectionPolicy) { this.bufferPageSize = DEFAULT_OPTIONS.bufferPageSize; this.connectionAcquisitionTimeout = DEFAULT_OPTIONS.connectionAcquisitionTimeout; this.connectionEndpointRediscoveryEnabled = connectionPolicy.isTcpConnectionEndpointRediscoveryEnabled(); this.connectTimeout = connectionPolicy.getConnectTimeout(); this.idleChannelTimeout = connectionPolicy.getIdleTcpConnectionTimeout(); this.idleChannelTimerResolution = DEFAULT_OPTIONS.idleChannelTimerResolution; this.idleEndpointTimeout = connectionPolicy.getIdleTcpEndpointTimeout(); this.maxBufferCapacity = DEFAULT_OPTIONS.maxBufferCapacity; this.maxChannelsPerEndpoint = connectionPolicy.getMaxConnectionsPerEndpoint(); this.maxRequestsPerChannel = connectionPolicy.getMaxRequestsPerConnection(); this.maxConcurrentRequestsPerEndpointOverride = DEFAULT_OPTIONS.maxConcurrentRequestsPerEndpointOverride; this.receiveHangDetectionTime = DEFAULT_OPTIONS.receiveHangDetectionTime; this.requestTimeout = connectionPolicy.getRequestTimeout(); this.requestTimerResolution = DEFAULT_OPTIONS.requestTimerResolution; this.sendHangDetectionTime = DEFAULT_OPTIONS.sendHangDetectionTime; this.shutdownTimeout = DEFAULT_OPTIONS.shutdownTimeout; this.threadCount = DEFAULT_OPTIONS.threadCount; this.userAgent = DEFAULT_OPTIONS.userAgent; }
this.idleEndpointTimeout = connectionPolicy.getIdleTcpEndpointTimeout();
public Builder(ConnectionPolicy connectionPolicy) { this.bufferPageSize = DEFAULT_OPTIONS.bufferPageSize; this.connectionAcquisitionTimeout = DEFAULT_OPTIONS.connectionAcquisitionTimeout; this.connectionEndpointRediscoveryEnabled = connectionPolicy.isTcpConnectionEndpointRediscoveryEnabled(); this.connectTimeout = connectionPolicy.getConnectTimeout(); this.idleChannelTimeout = connectionPolicy.getIdleTcpConnectionTimeout(); this.idleChannelTimerResolution = DEFAULT_OPTIONS.idleChannelTimerResolution; this.idleEndpointTimeout = connectionPolicy.getIdleTcpEndpointTimeout(); this.maxBufferCapacity = DEFAULT_OPTIONS.maxBufferCapacity; this.maxChannelsPerEndpoint = connectionPolicy.getMaxConnectionsPerEndpoint(); this.maxRequestsPerChannel = connectionPolicy.getMaxRequestsPerConnection(); this.maxConcurrentRequestsPerEndpointOverride = DEFAULT_OPTIONS.maxConcurrentRequestsPerEndpointOverride; this.receiveHangDetectionTime = DEFAULT_OPTIONS.receiveHangDetectionTime; this.requestTimeout = connectionPolicy.getRequestTimeout(); this.requestTimerResolution = DEFAULT_OPTIONS.requestTimerResolution; this.sendHangDetectionTime = DEFAULT_OPTIONS.sendHangDetectionTime; this.shutdownTimeout = DEFAULT_OPTIONS.shutdownTimeout; this.threadCount = DEFAULT_OPTIONS.threadCount; this.userAgent = DEFAULT_OPTIONS.userAgent; }
class Builder { private static final String DEFAULT_OPTIONS_PROPERTY_NAME = "azure.cosmos.directTcp.defaultOptions"; private static final Options DEFAULT_OPTIONS; static { Options options = null; try { final String string = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME); if (string != null) { try { options = RntbdObjectMapper.readValue(string, Options.class); } catch (IOException error) { logger.error("failed to parse default Direct TCP options {} due to ", string, error); } } if (options == null) { final String path = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME + "File"); if (path != null) { try { options = RntbdObjectMapper.readValue(new File(path), Options.class); } catch (IOException error) { logger.error("failed to load default Direct TCP options from {} due to ", path, error); } } } if (options == null) { final ClassLoader loader = RntbdTransportClient.class.getClassLoader(); final String name = DEFAULT_OPTIONS_PROPERTY_NAME + ".json"; try (InputStream stream = loader.getResourceAsStream(name)) { if (stream != null) { options = RntbdObjectMapper.readValue(stream, Options.class); } } catch (IOException error) { logger.error("failed to load Direct TCP options from resource {} due to ", name, error); } } } finally { if (options == null) { logger.info("Using default Direct TCP options: {}", DEFAULT_OPTIONS_PROPERTY_NAME); DEFAULT_OPTIONS = new Options(ConnectionPolicy.getDefaultPolicy()); } else { logger.info("Updated default Direct TCP options from system property {}: {}", DEFAULT_OPTIONS_PROPERTY_NAME, options); DEFAULT_OPTIONS = options; } } } private int bufferPageSize; private Duration connectionAcquisitionTimeout; private boolean connectionEndpointRediscoveryEnabled; private Duration connectTimeout; private Duration idleChannelTimeout; private Duration idleChannelTimerResolution; private Duration idleEndpointTimeout; private int maxBufferCapacity; private int maxChannelsPerEndpoint; private int maxRequestsPerChannel; private int maxConcurrentRequestsPerEndpointOverride; private Duration receiveHangDetectionTime; private Duration requestTimeout; private Duration requestTimerResolution; private Duration sendHangDetectionTime; private Duration shutdownTimeout; private int threadCount; private UserAgentContainer userAgent; public Builder bufferPageSize(final int value) { checkArgument(value >= 4096 && (value & (value - 1)) == 0, "expected value to be a power of 2 >= 4096, not %s", value); this.bufferPageSize = value; return this; } public Options build() { checkState(this.bufferPageSize <= this.maxBufferCapacity, "expected bufferPageSize (%s) <= maxBufferCapacity (%s)", this.bufferPageSize, this.maxBufferCapacity); return new Options(this); } public Builder connectionAcquisitionTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.connectionAcquisitionTimeout = value.compareTo(Duration.ZERO) < 0 ? Duration.ZERO : value; return this; } public Builder connectionEndpointRediscoveryEnabled(final boolean value) { this.connectionEndpointRediscoveryEnabled = value; return this; } public Builder connectionTimeout(final Duration value) { checkArgument(value == null || value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.connectTimeout = value; return this; } public Builder idleChannelTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.idleChannelTimeout = value; return this; } public Builder idleChannelTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) <= 0, "expected positive value, not %s", value); this.idleChannelTimerResolution = value; return this; } public Builder idleEndpointTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.idleEndpointTimeout = value; return this; } public Builder maxBufferCapacity(final int value) { checkArgument(value > 0 && (value & (value - 1)) == 0, "expected positive value, not %s", value); this.maxBufferCapacity = value; return this; } public Builder maxChannelsPerEndpoint(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxChannelsPerEndpoint = value; return this; } public Builder maxRequestsPerChannel(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxRequestsPerChannel = value; return this; } public Builder maxConcurrentRequestsPerEndpointOverride(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxConcurrentRequestsPerEndpointOverride = value; return this; } public Builder receiveHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.receiveHangDetectionTime = value; return this; } public Builder requestTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimeout = value; return this; } public Builder requestTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimerResolution = value; return this; } public Builder sendHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.sendHangDetectionTime = value; return this; } public Builder shutdownTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.shutdownTimeout = value; return this; } public Builder threadCount(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.threadCount = value; return this; } public Builder userAgent(final UserAgentContainer value) { checkNotNull(value, "expected non-null value"); this.userAgent = value; return this; } }
class Builder { private static final String DEFAULT_OPTIONS_PROPERTY_NAME = "azure.cosmos.directTcp.defaultOptions"; private static final Options DEFAULT_OPTIONS; static { Options options = null; try { final String string = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME); if (string != null) { try { options = RntbdObjectMapper.readValue(string, Options.class); } catch (IOException error) { logger.error("failed to parse default Direct TCP options {} due to ", string, error); } } if (options == null) { final String path = System.getProperty(DEFAULT_OPTIONS_PROPERTY_NAME + "File"); if (path != null) { try { options = RntbdObjectMapper.readValue(new File(path), Options.class); } catch (IOException error) { logger.error("failed to load default Direct TCP options from {} due to ", path, error); } } } if (options == null) { final ClassLoader loader = RntbdTransportClient.class.getClassLoader(); final String name = DEFAULT_OPTIONS_PROPERTY_NAME + ".json"; try (InputStream stream = loader.getResourceAsStream(name)) { if (stream != null) { options = RntbdObjectMapper.readValue(stream, Options.class); } } catch (IOException error) { logger.error("failed to load Direct TCP options from resource {} due to ", name, error); } } } finally { if (options == null) { logger.info("Using default Direct TCP options: {}", DEFAULT_OPTIONS_PROPERTY_NAME); DEFAULT_OPTIONS = new Options(ConnectionPolicy.getDefaultPolicy()); } else { logger.info("Updated default Direct TCP options from system property {}: {}", DEFAULT_OPTIONS_PROPERTY_NAME, options); DEFAULT_OPTIONS = options; } } } private int bufferPageSize; private Duration connectionAcquisitionTimeout; private boolean connectionEndpointRediscoveryEnabled; private Duration connectTimeout; private Duration idleChannelTimeout; private Duration idleChannelTimerResolution; private Duration idleEndpointTimeout; private int maxBufferCapacity; private int maxChannelsPerEndpoint; private int maxRequestsPerChannel; private int maxConcurrentRequestsPerEndpointOverride; private Duration receiveHangDetectionTime; private Duration requestTimeout; private Duration requestTimerResolution; private Duration sendHangDetectionTime; private Duration shutdownTimeout; private int threadCount; private UserAgentContainer userAgent; public Builder bufferPageSize(final int value) { checkArgument(value >= 4096 && (value & (value - 1)) == 0, "expected value to be a power of 2 >= 4096, not %s", value); this.bufferPageSize = value; return this; } public Options build() { checkState(this.bufferPageSize <= this.maxBufferCapacity, "expected bufferPageSize (%s) <= maxBufferCapacity (%s)", this.bufferPageSize, this.maxBufferCapacity); return new Options(this); } public Builder connectionAcquisitionTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.connectionAcquisitionTimeout = value.compareTo(Duration.ZERO) < 0 ? Duration.ZERO : value; return this; } public Builder connectionEndpointRediscoveryEnabled(final boolean value) { this.connectionEndpointRediscoveryEnabled = value; return this; } public Builder connectionTimeout(final Duration value) { checkArgument(value == null || value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.connectTimeout = value; return this; } public Builder idleChannelTimeout(final Duration value) { checkNotNull(value, "expected non-null value"); this.idleChannelTimeout = value; return this; } public Builder idleChannelTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) <= 0, "expected positive value, not %s", value); this.idleChannelTimerResolution = value; return this; } public Builder idleEndpointTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.idleEndpointTimeout = value; return this; } public Builder maxBufferCapacity(final int value) { checkArgument(value > 0 && (value & (value - 1)) == 0, "expected positive value, not %s", value); this.maxBufferCapacity = value; return this; } public Builder maxChannelsPerEndpoint(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxChannelsPerEndpoint = value; return this; } public Builder maxRequestsPerChannel(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxRequestsPerChannel = value; return this; } public Builder maxConcurrentRequestsPerEndpointOverride(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.maxConcurrentRequestsPerEndpointOverride = value; return this; } public Builder receiveHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.receiveHangDetectionTime = value; return this; } public Builder requestTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimeout = value; return this; } public Builder requestTimerResolution(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.requestTimerResolution = value; return this; } public Builder sendHangDetectionTime(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.sendHangDetectionTime = value; return this; } public Builder shutdownTimeout(final Duration value) { checkArgument(value != null && value.compareTo(Duration.ZERO) > 0, "expected positive value, not %s", value); this.shutdownTimeout = value; return this; } public Builder threadCount(final int value) { checkArgument(value > 0, "expected positive value, not %s", value); this.threadCount = value; return this; } public Builder userAgent(final UserAgentContainer value) { checkNotNull(value, "expected non-null value"); this.userAgent = value; return this; } }
Only code we are using URI.getAuthority(). One place we are doing RntbdUtils.getServerKey(physicalAddress).getAuthority() and other physicalAddress.getAuthority() both of which return same value, we could use RntbdUtils.getServerKey at both places but it will comes with extra cost of redundant URI object creation. So I think we should be good here, unless someone think otherwise
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
Can it then be limited to one implementation across or both needed?
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
As mentioned above one implementation will cost extra URI object creation on every request
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
Talked offline , current approach is right
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) {
private void evict(final RntbdEndpoint endpoint) { if (this.endpoints.remove(endpoint.serverKey().getAuthority()) != null) { this.evictions.incrementAndGet(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
class Provider implements RntbdEndpoint.Provider { private static final Logger logger = LoggerFactory.getLogger(Provider.class); private final AtomicBoolean closed; private final Config config; private final ConcurrentHashMap<String, RntbdEndpoint> endpoints; private final NioEventLoopGroup eventLoopGroup; private final AtomicInteger evictions; private final RntbdEndpointMonitoringProvider monitoring; private final RntbdRequestTimer requestTimer; private final RntbdTransportClient transportClient; private final IAddressResolver addressResolver; public Provider( final RntbdTransportClient transportClient, final Options options, final SslContext sslContext, final IAddressResolver addressResolver) { checkNotNull(transportClient, "expected non-null provider"); checkNotNull(options, "expected non-null options"); checkNotNull(sslContext, "expected non-null sslContext"); final DefaultThreadFactory threadFactory = new DefaultThreadFactory("cosmos-rntbd-nio", true); final LogLevel wireLogLevel; if (logger.isDebugEnabled()) { wireLogLevel = LogLevel.TRACE; } else { wireLogLevel = null; } this.addressResolver = addressResolver; this.transportClient = transportClient; this.config = new Config(options, sslContext, wireLogLevel); this.requestTimer = new RntbdRequestTimer( config.requestTimeoutInNanos(), config.requestTimerResolutionInNanos()); this.eventLoopGroup = new NioEventLoopGroup(options.threadCount(), threadFactory); this.endpoints = new ConcurrentHashMap<>(); this.evictions = new AtomicInteger(); this.closed = new AtomicBoolean(); this.monitoring = new RntbdEndpointMonitoringProvider(this); this.monitoring.init(); } @Override public void close() { if (this.closed.compareAndSet(false, true)) { this.monitoring.close(); for (final RntbdEndpoint endpoint : this.endpoints.values()) { endpoint.close(); } this.eventLoopGroup.shutdownGracefully(QUIET_PERIOD, this.config.shutdownTimeoutInNanos(), NANOSECONDS) .addListener(future -> { this.requestTimer.close(); if (future.isSuccess()) { logger.debug("\n [{}]\n closed endpoints", this); return; } logger.error("\n [{}]\n failed to close endpoints due to ", this, future.cause()); }); return; } logger.debug("\n [{}]\n already closed", this); } @Override public Config config() { return this.config; } @Override public int count() { return this.endpoints.size(); } @Override public int evictions() { return this.evictions.get(); } @Override public RntbdEndpoint get(final URI physicalAddress) { return endpoints.computeIfAbsent(physicalAddress.getAuthority(), authority -> new RntbdServiceEndpoint( this, this.config, this.eventLoopGroup, this.requestTimer, physicalAddress)); } @Override public IAddressResolver getAddressResolver() { return this.addressResolver; } @Override public Stream<RntbdEndpoint> list() { return this.endpoints.values().stream(); } }
This would fail for serverless accounts.. Worth checking wit Thomas Weiss what they want to happen in this case - like is there any way the backend introduced in the meantime how we could retrieve the max throughput allowed for serverless - we don't want to hard-code it in the SDK so we would need some way to get it from the gateway backend to make this work for serverless. Explicitly non-blocking - meaning feel free to merge the PR even without resolving this.
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); }
return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions())
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final CancellationTokenSource cancellationTokenSource; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final Scheduler scheduler; private final CosmosAsyncContainer targetContainer; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.close(); } logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
Thanks @FabianMeiswinkel , I missed this point completely. For now I created an GitHub issue to track it, will sync with Thomas for this one.
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); }
return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions())
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final CancellationTokenSource cancellationTokenSource; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final Scheduler scheduler; private final CosmosAsyncContainer targetContainer; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.close(); } logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
Mono.defer() will run this operation for each subscriber separately. I am not trying to confuse you here :) , but just want to provide this heads up, that Mono.defer() will run the full sequence for each subscription, if that's what you want.
private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); }
return Mono.defer(() -> Mono.just(this.throughputResolveLevel))
private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
`queryOffers()` returns a `Flux<FeedResponse>`, so calling `single()` on this may fail if there are more than one value in the underlying `Flux`. Make sure either you are only expecting a single value, or if not, just use `flatMap` directly.
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); }
.single()
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
No sysouts :P Please remove it.
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); }
System.out.println(throwable.getCause());
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
Also, I don't think there is need for `onErrorResume()`
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); }
System.out.println(throwable.getCause());
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
Please unwrap the exception here, like mentioned above in one of the comments.
private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; }
CosmosException cosmosException = Utils.as(throwable, CosmosException.class);
private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
This is the correct usage of `onErrorResume()` :) As on error here, you are resuming your reactive stream with an empty Mono.
private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); }
.onErrorResume(throwable -> {
private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
yup, the intent here is to recalculate the throughput level in case it changed. Will discuss offline.
private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); }
return Mono.defer(() -> Mono.just(this.throughputResolveLevel))
private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
yup, will only expect one value here, and we are using same in the internal implementation of readThroughput
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); }
.single()
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
Oh sorry, I left it by accident, I was debugging some part of the code before, will remove in the next iteration
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); }
System.out.println(throwable.getCause());
private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
Ack, will add in the next iteration.
private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; }
CosmosException cosmosException = Utils.as(throwable, CosmosException.class);
private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
yup yup, that is the intent here
private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groupControllers.values()) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); }
.onErrorResume(throwable -> {
private Flux<Void> refreshContainerMaxThroughputTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); if (this.throughputResolveLevel == ThroughputResolveLevel.NONE) { return Flux.empty(); } return Mono.delay(DEFAULT_THROUGHPUT_REFRESH_INTERVAL) .flatMap(t -> this.resolveContainerMaxThroughput()) .flatMapIterable(controller -> this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .doOnNext(groupController -> groupController.onContainerMaxThroughputRefresh(this.maxContainerThroughput.get())) .onErrorResume(throwable -> { logger.warn("Refresh throughput failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ConcurrentHashMap<String, ThroughputGroupControllerBase> groupControllers; private final List<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private final Scheduler scheduler; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, List<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllers = new ConcurrentHashMap<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = groups.get(0).getTargetContainer(); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); this.scheduler = Schedulers.elastic(); } private ThroughputResolveLevel getThroughputResolveLevel(List<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { this.setDefaultGroupController(); scheduler.schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.defer(() -> Mono.just(this.throughputResolveLevel)) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers(resourceId, new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .onErrorResume(throwable -> { System.out.println(throwable.getCause()); return Mono.error(throwable); }) .map(ModelBridgeInternal::createThroughputRespose); } private void setDefaultGroupController() { List<ThroughputGroupControllerBase> defaultGroupControllers = this.groupControllers.values().stream().filter(ThroughputGroupControllerBase::isUseByDefault).collect(Collectors.toList()); if (defaultGroupControllers.size() > 1) { throw new IllegalArgumentException("There should only be one default throughput control group"); } if (defaultGroupControllers.size() == 1) { this.defaultGroupController = defaultGroupControllers.get(0); } } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(throwable, CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return Mono.justOrEmpty(this.groupControllers.get(request1.getThroughputControlGroupName())) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> { ThroughputGroupControllerBase groupController = this.groupControllers.computeIfAbsent( group.getGroupName(), groupName -> ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid)); return Mono.just(groupController); }) .flatMap(groupController -> groupController.init()) .then(Mono.just(this)); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groupControllers.values()) .flatMap(groupController -> groupController.close()) .then(); } }
class ThroughputContainerController implements IThroughputContainerController { private static final Logger logger = LoggerFactory.getLogger(ThroughputContainerController.class); private static final Duration DEFAULT_THROUGHPUT_REFRESH_INTERVAL = Duration.ofMinutes(15); private static final int NO_OFFER_EXCEPTION_STATUS_CODE = HttpConstants.StatusCodes.BADREQUEST; private static final int NO_OFFER_EXCEPTION_SUB_STATUS_CODE = HttpConstants.SubStatusCodes.UNKNOWN; private final AsyncDocumentClient client; private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final AsyncCache<String, ThroughputGroupControllerBase> groupControllerCache; private final Set<ThroughputControlGroup> groups; private final AtomicReference<Integer> maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final CosmosAsyncContainer targetContainer; private final CancellationTokenSource cancellationTokenSource; private ThroughputGroupControllerBase defaultGroupController; private String targetContainerRid; private String targetDatabaseRid; private ThroughputResolveLevel throughputResolveLevel; public ThroughputContainerController( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, Set<ThroughputControlGroup> groups, RxPartitionKeyRangeCache partitionKeyRangeCache) { checkNotNull(globalEndpointManager, "GlobalEndpointManager can not be null"); checkArgument(groups != null && groups.size() > 0, "Throughput budget groups can not be null or empty"); checkNotNull(partitionKeyRangeCache, "RxPartitionKeyRangeCache can not be null"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.groupControllerCache = new AsyncCache<>(); this.groups = groups; this.maxContainerThroughput = new AtomicReference<>(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.targetContainer = BridgeInternal.getTargetContainerFromThroughputControlGroup(groups.iterator().next()); this.client = CosmosBridgeInternal.getContextClient(this.targetContainer); this.throughputResolveLevel = this.getThroughputResolveLevel(groups); this.cancellationTokenSource = new CancellationTokenSource(); } private ThroughputResolveLevel getThroughputResolveLevel(Set<ThroughputControlGroup> groupConfigs) { if (groupConfigs.stream().anyMatch(groupConfig -> groupConfig.getTargetThroughputThreshold() != null)) { return ThroughputResolveLevel.CONTAINER; } else { return ThroughputResolveLevel.NONE; } } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveDatabaseResourceId() .flatMap(controller -> this.resolveContainerResourceId()) .flatMap(controller -> this.resolveContainerMaxThroughput()) .flatMap(controller -> this.createAndInitializeGroupControllers()) .doOnSuccess(controller -> { Schedulers.parallel().schedule(() -> this.refreshContainerMaxThroughputTask(this.cancellationTokenSource.getToken()).subscribe()); }) .thenReturn((T) this); } private Mono<ThroughputContainerController> resolveDatabaseResourceId() { return this.targetContainer.getDatabase().read() .flatMap(response -> { this.targetDatabaseRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerResourceId() { return this.targetContainer.read() .flatMap(response -> { this.targetContainerRid = response.getProperties().getResourceId(); return Mono.just(this); }); } private Mono<ThroughputContainerController> resolveContainerMaxThroughput() { return Mono.just(this.throughputResolveLevel) .flatMap(throughputResolveLevel -> { if (throughputResolveLevel == ThroughputResolveLevel.CONTAINER) { return this.resolveThroughputByResourceId(this.targetContainerRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.DATABASE; } return Mono.error(throwable); }); } else if (throughputResolveLevel == ThroughputResolveLevel.DATABASE) { return this.resolveThroughputByResourceId(this.targetDatabaseRid) .onErrorResume(throwable -> { if (this.isOfferNotConfiguredException(throwable)) { this.throughputResolveLevel = ThroughputResolveLevel.CONTAINER; } return Mono.error(throwable); }); } return Mono.empty(); }) .flatMap(throughputResponse -> { this.updateMaxContainerThroughput(throughputResponse); return Mono.empty(); }) .retryWhen( RetrySpec.max(1).filter(throwable -> this.isOfferNotConfiguredException(throwable)) ).thenReturn(this); } private Mono<ThroughputResponse> resolveThroughputByResourceId(String resourceId) { checkArgument(StringUtils.isNotEmpty(resourceId), "ResourceId can not be null or empty"); return this.client.queryOffers( BridgeInternal.getOfferQuerySpecFromResourceId(this.targetContainer, resourceId), new CosmosQueryRequestOptions()) .single() .flatMap(offerFeedResponse -> { if (offerFeedResponse.getResults().isEmpty()) { return Mono.error( BridgeInternal.createCosmosException(NO_OFFER_EXCEPTION_STATUS_CODE, "No offers found for the resource " + resourceId)); } return this.client.readOffer(offerFeedResponse.getResults().get(0).getSelfLink()).single(); }) .map(ModelBridgeInternal::createThroughputRespose); } private void updateMaxContainerThroughput(ThroughputResponse throughputResponse) { checkNotNull(throughputResponse, "Throughput response can not be null"); ThroughputProperties throughputProperties = throughputResponse.getProperties(); this.maxContainerThroughput.set( Math.max(throughputProperties.getAutoscaleMaxThroughput(), throughputProperties.getManualThroughput())); } private boolean isOfferNotConfiguredException(Throwable throwable) { checkNotNull(throwable, "Throwable should not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); return cosmosException != null && cosmosException.getStatusCode() == NO_OFFER_EXCEPTION_STATUS_CODE && cosmosException.getSubStatusCode() == NO_OFFER_EXCEPTION_SUB_STATUS_CODE; } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> originalRequestMono) { checkNotNull(request, "Request can not be null"); checkNotNull(originalRequestMono, "Original request mono can not be null"); return Mono.just(request) .flatMap(request1 -> { if (request1.getThroughputControlGroupName() == null) { return Mono.just(new Utils.ValueHolder<>(this.defaultGroupController)); } else { return this.getOrCreateThroughputGroupController(request.getThroughputControlGroupName()) .defaultIfEmpty(this.defaultGroupController) .map(Utils.ValueHolder::new); } }) .flatMap(groupController -> { if (groupController.v != null) { return groupController.v.processRequest(request, originalRequestMono); } return originalRequestMono; }); } private Mono<ThroughputGroupControllerBase> getOrCreateThroughputGroupController(String groupName) { if (StringUtils.isEmpty(groupName)) { return Mono.empty(); } ThroughputControlGroup group = this.groups.stream().filter(groupConfig -> StringUtils.equals(groupName, groupConfig.getGroupName())).findFirst().orElse(null); if (group == null) { return Mono.empty(); } return this.resolveThroughputGroupController(group); } public String getTargetContainerRid() { return this.targetContainerRid; } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { checkNotNull(request, "Request can not be null"); return StringUtils.equals(this.targetContainerRid, request.requestContext.resolvedCollectionRid); } private Mono<ThroughputContainerController> createAndInitializeGroupControllers() { return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .then(Mono.just(this)); } private Mono<ThroughputGroupControllerBase> resolveThroughputGroupController(ThroughputControlGroup group) { return this.groupControllerCache.getAsync( group.getGroupName(), null, () -> this.createAndInitializeGroupController(group)); } private Mono<ThroughputGroupControllerBase> createAndInitializeGroupController(ThroughputControlGroup group) { ThroughputGroupControllerBase groupController = ThroughputGroupControllerFactory.createController( this.connectionMode, this.globalEndpointManager, group, this.maxContainerThroughput.get(), this.partitionKeyRangeCache, this.targetContainerRid); return groupController .init() .cast(ThroughputGroupControllerBase.class) .doOnSuccess(controller -> { if (controller.isDefault()) { this.defaultGroupController = controller; } }); } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return Flux.fromIterable(this.groups) .flatMap(group -> this.resolveThroughputGroupController(group)) .flatMap(groupController -> groupController.close()) .then(); } }
```suggestion logger.warn("Reset throughput usage failed", throwable); ``` in general you should use slf4j place holder "{}" instead of "%s". for exceptions slfj4 has special support if they come as the last arg. no need for any place holder
private Flux<Void> throughputUsageCycleRenewTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); return Mono.delay(DEFAULT_THROUGHPUT_USAGE_RESET_DURATION) .flatMap(t -> this.resolveRequestController()) .doOnSuccess(requestController -> requestController.renewThroughputUsageCycle(this.groupThroughput.get())) .onErrorResume(throwable -> { logger.warn("Reset throughput usage failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); }
logger.warn("Reset throughput usage failed with reason %s", throwable);
private Flux<Void> throughputUsageCycleRenewTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); return Mono.delay(DEFAULT_THROUGHPUT_USAGE_RESET_DURATION) .flatMap(t -> this.resolveRequestController()) .doOnSuccess(requestController -> requestController.renewThroughputUsageCycle(this.groupThroughput.get())) .onErrorResume(throwable -> { logger.warn("Reset throughput usage failed with reason", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); }
class ThroughputGroupControllerBase implements IThroughputController { private final static Logger logger = LoggerFactory.getLogger(ThroughputGroupControllerBase.class); private final Duration DEFAULT_THROUGHPUT_USAGE_RESET_DURATION = Duration.ofSeconds(1); private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ThroughputControlGroup group; private final AtomicReference<Double> groupThroughput; private final AtomicInteger maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final AsyncCache<String, IThroughputRequestController> requestControllerAsyncCache; private final String targetContainerRid; private final CancellationTokenSource cancellationTokenSource; public ThroughputGroupControllerBase( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, ThroughputControlGroup group, Integer maxContainerThroughput, RxPartitionKeyRangeCache partitionKeyRangeCache, String targetContainerRid) { checkNotNull(globalEndpointManager, "Global endpoint manager can not be null"); checkNotNull(group, "Throughput control group can not be null"); checkNotNull(partitionKeyRangeCache, "Partition key range cache can not be null or empty"); checkArgument(StringUtils.isNotEmpty(targetContainerRid), "Target container rid cannot be null nor empty"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.group = group; if (this.group.getTargetThroughputThreshold() != null) { checkNotNull(maxContainerThroughput, "Max container throughput can not be null when target throughput threshold defined"); this.maxContainerThroughput = new AtomicInteger(maxContainerThroughput); } else { this.maxContainerThroughput = null; } this.groupThroughput = new AtomicReference<>(); this.calculateGroupThroughput(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.requestControllerAsyncCache = new AsyncCache<>(); this.targetContainerRid = targetContainerRid; this.cancellationTokenSource = new CancellationTokenSource(); } private void calculateGroupThroughput() { double allocatedThroughput = Double.MAX_VALUE; if (this.group.getTargetThroughputThreshold() != null) { allocatedThroughput = Math.min(allocatedThroughput, this.maxContainerThroughput.get() * this.group.getTargetThroughputThreshold()); } if (this.group.getTargetThroughput() != null) { allocatedThroughput = Math.min(allocatedThroughput, this.group.getTargetThroughput()); } this.groupThroughput.set(allocatedThroughput); } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveRequestController() .doOnSuccess(dummy -> { this.throughputUsageCycleRenewTask(this.cancellationTokenSource.getToken()).subscribeOn(Schedulers.parallel()).subscribe(); }) .thenReturn((T)this); } private Mono<IThroughputRequestController> createAndInitializeRequestController() { IThroughputRequestController requestController; if (this.connectionMode == ConnectionMode.DIRECT) { requestController = new PkRangesThroughputRequestController( this.globalEndpointManager, this.partitionKeyRangeCache, this.targetContainerRid, this.groupThroughput.get()); } else if (this.connectionMode == ConnectionMode.GATEWAY) { requestController = new GlobalThroughputRequestController(this.globalEndpointManager, this.groupThroughput.get()); } else { throw new IllegalArgumentException(String.format("Connection mode %s is not supported")); } return requestController.init(); } public boolean isDefault() { return this.group.isDefault(); } public void onContainerMaxThroughputRefresh(int maxContainerThroughput) { if (this.maxContainerThroughput.getAndSet(maxContainerThroughput) != maxContainerThroughput) { this.calculateGroupThroughput(); } } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return this.resolveRequestController() .flatMap(requestController -> requestController.close()); } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> nextRequestMono) { return this.resolveRequestController() .flatMap(requestController -> { if (requestController.canHandleRequest(request)) { return Mono.just(requestController); } else { return this.shouldUpdateRequestController(request) .flatMap(shouldUpdate -> { if (shouldUpdate) { requestController.close().subscribeOn(Schedulers.parallel()).subscribe(); this.refreshRequestController(); return this.resolveRequestController(); } else { return Mono.just(requestController); } }); } }) .flatMap(requestController -> { if (requestController.canHandleRequest(request)) { return requestController.processRequest(request, nextRequestMono) .doOnError(throwable -> this.handleException(throwable)); } else { logger.warn( "Can not find request controller to handle request {} with pkRangeId {}", request.getActivityId(), request.requestContext.resolvedPartitionKeyRange.getId()); return nextRequestMono; } }); } private Mono<Boolean> shouldUpdateRequestController(RxDocumentServiceRequest request) { return this.partitionKeyRangeCache.tryGetRangeByPartitionKeyRangeId( null, request.requestContext.resolvedCollectionRid, request.requestContext.resolvedPartitionKeyRange.getId(), null) .map(pkRangeHolder -> pkRangeHolder.v) .flatMap(pkRange -> { if (pkRange == null) { return Mono.just(Boolean.FALSE); } else { return Mono.just(Boolean.TRUE); }}); } private Mono<IThroughputRequestController> resolveRequestController() { return this.requestControllerAsyncCache.getAsync( this.group.getGroupName(), null, () -> this.createAndInitializeRequestController()); } private void refreshRequestController() { this.requestControllerAsyncCache.refresh( this.group.getGroupName(), () -> this.createAndInitializeRequestController()); } private void handleException(Throwable throwable) { checkNotNull(throwable, "Throwable can not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (isPartitionSplit(cosmosException) || isPartitionCompletingSplittingException(cosmosException)) { this.refreshRequestController(); } } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { return this.isDefault() || StringUtils.equals(this.group.getGroupName(), request.getThroughputControlGroupName()); } }
class ThroughputGroupControllerBase implements IThroughputController { private final static Logger logger = LoggerFactory.getLogger(ThroughputGroupControllerBase.class); private final Duration DEFAULT_THROUGHPUT_USAGE_RESET_DURATION = Duration.ofSeconds(1); private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ThroughputControlGroup group; private final AtomicReference<Double> groupThroughput; private final AtomicInteger maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final AsyncCache<String, IThroughputRequestController> requestControllerAsyncCache; private final String targetContainerRid; private final CancellationTokenSource cancellationTokenSource; public ThroughputGroupControllerBase( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, ThroughputControlGroup group, Integer maxContainerThroughput, RxPartitionKeyRangeCache partitionKeyRangeCache, String targetContainerRid) { checkNotNull(globalEndpointManager, "Global endpoint manager can not be null"); checkNotNull(group, "Throughput control group can not be null"); checkNotNull(partitionKeyRangeCache, "Partition key range cache can not be null or empty"); checkArgument(StringUtils.isNotEmpty(targetContainerRid), "Target container rid cannot be null nor empty"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.group = group; if (this.group.getTargetThroughputThreshold() != null) { checkNotNull(maxContainerThroughput, "Max container throughput can not be null when target throughput threshold defined"); this.maxContainerThroughput = new AtomicInteger(maxContainerThroughput); } else { this.maxContainerThroughput = null; } this.groupThroughput = new AtomicReference<>(); this.calculateGroupThroughput(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.requestControllerAsyncCache = new AsyncCache<>(); this.targetContainerRid = targetContainerRid; this.cancellationTokenSource = new CancellationTokenSource(); } private void calculateGroupThroughput() { double allocatedThroughput = Double.MAX_VALUE; if (this.group.getTargetThroughputThreshold() != null) { allocatedThroughput = Math.min(allocatedThroughput, this.maxContainerThroughput.get() * this.group.getTargetThroughputThreshold()); } if (this.group.getTargetThroughput() != null) { allocatedThroughput = Math.min(allocatedThroughput, this.group.getTargetThroughput()); } this.groupThroughput.set(allocatedThroughput); } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveRequestController() .doOnSuccess(dummy -> { this.throughputUsageCycleRenewTask(this.cancellationTokenSource.getToken()).subscribeOn(Schedulers.parallel()).subscribe(); }) .thenReturn((T)this); } private Mono<IThroughputRequestController> createAndInitializeRequestController() { IThroughputRequestController requestController; if (this.connectionMode == ConnectionMode.DIRECT) { requestController = new PkRangesThroughputRequestController( this.globalEndpointManager, this.partitionKeyRangeCache, this.targetContainerRid, this.groupThroughput.get()); } else if (this.connectionMode == ConnectionMode.GATEWAY) { requestController = new GlobalThroughputRequestController(this.globalEndpointManager, this.groupThroughput.get()); } else { throw new IllegalArgumentException(String.format("Connection mode %s is not supported", this.connectionMode)); } return requestController.init(); } public boolean isDefault() { return this.group.isDefault(); } public void onContainerMaxThroughputRefresh(int maxContainerThroughput) { if (this.maxContainerThroughput.getAndSet(maxContainerThroughput) != maxContainerThroughput) { this.calculateGroupThroughput(); } } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return this.resolveRequestController() .flatMap(requestController -> requestController.close()); } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> nextRequestMono) { return this.resolveRequestController() .flatMap(requestController -> { if (requestController.canHandleRequest(request)) { return Mono.just(requestController); } else { return this.shouldUpdateRequestController(request) .flatMap(shouldUpdate -> { if (shouldUpdate) { requestController.close().subscribeOn(Schedulers.parallel()).subscribe(); this.refreshRequestController(); return this.resolveRequestController(); } else { return Mono.just(requestController); } }); } }) .flatMap(requestController -> { if (requestController.canHandleRequest(request)) { return requestController.processRequest(request, nextRequestMono) .doOnError(throwable -> this.handleException(throwable)); } else { logger.warn( "Can not find request controller to handle request {} with pkRangeId {}", request.getActivityId(), request.requestContext.resolvedPartitionKeyRange.getId()); return nextRequestMono; } }); } private Mono<Boolean> shouldUpdateRequestController(RxDocumentServiceRequest request) { return this.partitionKeyRangeCache.tryGetRangeByPartitionKeyRangeId( null, request.requestContext.resolvedCollectionRid, request.requestContext.resolvedPartitionKeyRange.getId(), null) .map(pkRangeHolder -> pkRangeHolder.v) .flatMap(pkRange -> { if (pkRange == null) { return Mono.just(Boolean.FALSE); } else { return Mono.just(Boolean.TRUE); }}); } private Mono<IThroughputRequestController> resolveRequestController() { return this.requestControllerAsyncCache.getAsync( this.group.getGroupName(), null, () -> this.createAndInitializeRequestController()); } private void refreshRequestController() { this.requestControllerAsyncCache.refresh( this.group.getGroupName(), () -> this.createAndInitializeRequestController()); } private void handleException(Throwable throwable) { checkNotNull(throwable, "Throwable can not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (isPartitionSplit(cosmosException) || isPartitionCompletingSplittingException(cosmosException)) { this.refreshRequestController(); } } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { return this.isDefault() || StringUtils.equals(this.group.getGroupName(), request.getThroughputControlGroupName()); } }
got it, will change across everywhere.
private Flux<Void> throughputUsageCycleRenewTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); return Mono.delay(DEFAULT_THROUGHPUT_USAGE_RESET_DURATION) .flatMap(t -> this.resolveRequestController()) .doOnSuccess(requestController -> requestController.renewThroughputUsageCycle(this.groupThroughput.get())) .onErrorResume(throwable -> { logger.warn("Reset throughput usage failed with reason %s", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); }
logger.warn("Reset throughput usage failed with reason %s", throwable);
private Flux<Void> throughputUsageCycleRenewTask(CancellationToken cancellationToken) { checkNotNull(cancellationToken, "Cancellation token can not be null"); return Mono.delay(DEFAULT_THROUGHPUT_USAGE_RESET_DURATION) .flatMap(t -> this.resolveRequestController()) .doOnSuccess(requestController -> requestController.renewThroughputUsageCycle(this.groupThroughput.get())) .onErrorResume(throwable -> { logger.warn("Reset throughput usage failed with reason", throwable); return Mono.empty(); }) .then() .repeat(() -> !cancellationToken.isCancellationRequested()); }
class ThroughputGroupControllerBase implements IThroughputController { private final static Logger logger = LoggerFactory.getLogger(ThroughputGroupControllerBase.class); private final Duration DEFAULT_THROUGHPUT_USAGE_RESET_DURATION = Duration.ofSeconds(1); private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ThroughputControlGroup group; private final AtomicReference<Double> groupThroughput; private final AtomicInteger maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final AsyncCache<String, IThroughputRequestController> requestControllerAsyncCache; private final String targetContainerRid; private final CancellationTokenSource cancellationTokenSource; public ThroughputGroupControllerBase( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, ThroughputControlGroup group, Integer maxContainerThroughput, RxPartitionKeyRangeCache partitionKeyRangeCache, String targetContainerRid) { checkNotNull(globalEndpointManager, "Global endpoint manager can not be null"); checkNotNull(group, "Throughput control group can not be null"); checkNotNull(partitionKeyRangeCache, "Partition key range cache can not be null or empty"); checkArgument(StringUtils.isNotEmpty(targetContainerRid), "Target container rid cannot be null nor empty"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.group = group; if (this.group.getTargetThroughputThreshold() != null) { checkNotNull(maxContainerThroughput, "Max container throughput can not be null when target throughput threshold defined"); this.maxContainerThroughput = new AtomicInteger(maxContainerThroughput); } else { this.maxContainerThroughput = null; } this.groupThroughput = new AtomicReference<>(); this.calculateGroupThroughput(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.requestControllerAsyncCache = new AsyncCache<>(); this.targetContainerRid = targetContainerRid; this.cancellationTokenSource = new CancellationTokenSource(); } private void calculateGroupThroughput() { double allocatedThroughput = Double.MAX_VALUE; if (this.group.getTargetThroughputThreshold() != null) { allocatedThroughput = Math.min(allocatedThroughput, this.maxContainerThroughput.get() * this.group.getTargetThroughputThreshold()); } if (this.group.getTargetThroughput() != null) { allocatedThroughput = Math.min(allocatedThroughput, this.group.getTargetThroughput()); } this.groupThroughput.set(allocatedThroughput); } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveRequestController() .doOnSuccess(dummy -> { this.throughputUsageCycleRenewTask(this.cancellationTokenSource.getToken()).subscribeOn(Schedulers.parallel()).subscribe(); }) .thenReturn((T)this); } private Mono<IThroughputRequestController> createAndInitializeRequestController() { IThroughputRequestController requestController; if (this.connectionMode == ConnectionMode.DIRECT) { requestController = new PkRangesThroughputRequestController( this.globalEndpointManager, this.partitionKeyRangeCache, this.targetContainerRid, this.groupThroughput.get()); } else if (this.connectionMode == ConnectionMode.GATEWAY) { requestController = new GlobalThroughputRequestController(this.globalEndpointManager, this.groupThroughput.get()); } else { throw new IllegalArgumentException(String.format("Connection mode %s is not supported")); } return requestController.init(); } public boolean isDefault() { return this.group.isDefault(); } public void onContainerMaxThroughputRefresh(int maxContainerThroughput) { if (this.maxContainerThroughput.getAndSet(maxContainerThroughput) != maxContainerThroughput) { this.calculateGroupThroughput(); } } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return this.resolveRequestController() .flatMap(requestController -> requestController.close()); } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> nextRequestMono) { return this.resolveRequestController() .flatMap(requestController -> { if (requestController.canHandleRequest(request)) { return Mono.just(requestController); } else { return this.shouldUpdateRequestController(request) .flatMap(shouldUpdate -> { if (shouldUpdate) { requestController.close().subscribeOn(Schedulers.parallel()).subscribe(); this.refreshRequestController(); return this.resolveRequestController(); } else { return Mono.just(requestController); } }); } }) .flatMap(requestController -> { if (requestController.canHandleRequest(request)) { return requestController.processRequest(request, nextRequestMono) .doOnError(throwable -> this.handleException(throwable)); } else { logger.warn( "Can not find request controller to handle request {} with pkRangeId {}", request.getActivityId(), request.requestContext.resolvedPartitionKeyRange.getId()); return nextRequestMono; } }); } private Mono<Boolean> shouldUpdateRequestController(RxDocumentServiceRequest request) { return this.partitionKeyRangeCache.tryGetRangeByPartitionKeyRangeId( null, request.requestContext.resolvedCollectionRid, request.requestContext.resolvedPartitionKeyRange.getId(), null) .map(pkRangeHolder -> pkRangeHolder.v) .flatMap(pkRange -> { if (pkRange == null) { return Mono.just(Boolean.FALSE); } else { return Mono.just(Boolean.TRUE); }}); } private Mono<IThroughputRequestController> resolveRequestController() { return this.requestControllerAsyncCache.getAsync( this.group.getGroupName(), null, () -> this.createAndInitializeRequestController()); } private void refreshRequestController() { this.requestControllerAsyncCache.refresh( this.group.getGroupName(), () -> this.createAndInitializeRequestController()); } private void handleException(Throwable throwable) { checkNotNull(throwable, "Throwable can not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (isPartitionSplit(cosmosException) || isPartitionCompletingSplittingException(cosmosException)) { this.refreshRequestController(); } } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { return this.isDefault() || StringUtils.equals(this.group.getGroupName(), request.getThroughputControlGroupName()); } }
class ThroughputGroupControllerBase implements IThroughputController { private final static Logger logger = LoggerFactory.getLogger(ThroughputGroupControllerBase.class); private final Duration DEFAULT_THROUGHPUT_USAGE_RESET_DURATION = Duration.ofSeconds(1); private final ConnectionMode connectionMode; private final GlobalEndpointManager globalEndpointManager; private final ThroughputControlGroup group; private final AtomicReference<Double> groupThroughput; private final AtomicInteger maxContainerThroughput; private final RxPartitionKeyRangeCache partitionKeyRangeCache; private final AsyncCache<String, IThroughputRequestController> requestControllerAsyncCache; private final String targetContainerRid; private final CancellationTokenSource cancellationTokenSource; public ThroughputGroupControllerBase( ConnectionMode connectionMode, GlobalEndpointManager globalEndpointManager, ThroughputControlGroup group, Integer maxContainerThroughput, RxPartitionKeyRangeCache partitionKeyRangeCache, String targetContainerRid) { checkNotNull(globalEndpointManager, "Global endpoint manager can not be null"); checkNotNull(group, "Throughput control group can not be null"); checkNotNull(partitionKeyRangeCache, "Partition key range cache can not be null or empty"); checkArgument(StringUtils.isNotEmpty(targetContainerRid), "Target container rid cannot be null nor empty"); this.connectionMode = connectionMode; this.globalEndpointManager = globalEndpointManager; this.group = group; if (this.group.getTargetThroughputThreshold() != null) { checkNotNull(maxContainerThroughput, "Max container throughput can not be null when target throughput threshold defined"); this.maxContainerThroughput = new AtomicInteger(maxContainerThroughput); } else { this.maxContainerThroughput = null; } this.groupThroughput = new AtomicReference<>(); this.calculateGroupThroughput(); this.partitionKeyRangeCache = partitionKeyRangeCache; this.requestControllerAsyncCache = new AsyncCache<>(); this.targetContainerRid = targetContainerRid; this.cancellationTokenSource = new CancellationTokenSource(); } private void calculateGroupThroughput() { double allocatedThroughput = Double.MAX_VALUE; if (this.group.getTargetThroughputThreshold() != null) { allocatedThroughput = Math.min(allocatedThroughput, this.maxContainerThroughput.get() * this.group.getTargetThroughputThreshold()); } if (this.group.getTargetThroughput() != null) { allocatedThroughput = Math.min(allocatedThroughput, this.group.getTargetThroughput()); } this.groupThroughput.set(allocatedThroughput); } @Override @SuppressWarnings("unchecked") public <T> Mono<T> init() { return this.resolveRequestController() .doOnSuccess(dummy -> { this.throughputUsageCycleRenewTask(this.cancellationTokenSource.getToken()).subscribeOn(Schedulers.parallel()).subscribe(); }) .thenReturn((T)this); } private Mono<IThroughputRequestController> createAndInitializeRequestController() { IThroughputRequestController requestController; if (this.connectionMode == ConnectionMode.DIRECT) { requestController = new PkRangesThroughputRequestController( this.globalEndpointManager, this.partitionKeyRangeCache, this.targetContainerRid, this.groupThroughput.get()); } else if (this.connectionMode == ConnectionMode.GATEWAY) { requestController = new GlobalThroughputRequestController(this.globalEndpointManager, this.groupThroughput.get()); } else { throw new IllegalArgumentException(String.format("Connection mode %s is not supported", this.connectionMode)); } return requestController.init(); } public boolean isDefault() { return this.group.isDefault(); } public void onContainerMaxThroughputRefresh(int maxContainerThroughput) { if (this.maxContainerThroughput.getAndSet(maxContainerThroughput) != maxContainerThroughput) { this.calculateGroupThroughput(); } } @Override public Mono<Void> close() { this.cancellationTokenSource.cancel(); return this.resolveRequestController() .flatMap(requestController -> requestController.close()); } @Override public <T> Mono<T> processRequest(RxDocumentServiceRequest request, Mono<T> nextRequestMono) { return this.resolveRequestController() .flatMap(requestController -> { if (requestController.canHandleRequest(request)) { return Mono.just(requestController); } else { return this.shouldUpdateRequestController(request) .flatMap(shouldUpdate -> { if (shouldUpdate) { requestController.close().subscribeOn(Schedulers.parallel()).subscribe(); this.refreshRequestController(); return this.resolveRequestController(); } else { return Mono.just(requestController); } }); } }) .flatMap(requestController -> { if (requestController.canHandleRequest(request)) { return requestController.processRequest(request, nextRequestMono) .doOnError(throwable -> this.handleException(throwable)); } else { logger.warn( "Can not find request controller to handle request {} with pkRangeId {}", request.getActivityId(), request.requestContext.resolvedPartitionKeyRange.getId()); return nextRequestMono; } }); } private Mono<Boolean> shouldUpdateRequestController(RxDocumentServiceRequest request) { return this.partitionKeyRangeCache.tryGetRangeByPartitionKeyRangeId( null, request.requestContext.resolvedCollectionRid, request.requestContext.resolvedPartitionKeyRange.getId(), null) .map(pkRangeHolder -> pkRangeHolder.v) .flatMap(pkRange -> { if (pkRange == null) { return Mono.just(Boolean.FALSE); } else { return Mono.just(Boolean.TRUE); }}); } private Mono<IThroughputRequestController> resolveRequestController() { return this.requestControllerAsyncCache.getAsync( this.group.getGroupName(), null, () -> this.createAndInitializeRequestController()); } private void refreshRequestController() { this.requestControllerAsyncCache.refresh( this.group.getGroupName(), () -> this.createAndInitializeRequestController()); } private void handleException(Throwable throwable) { checkNotNull(throwable, "Throwable can not be null"); CosmosException cosmosException = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (isPartitionSplit(cosmosException) || isPartitionCompletingSplittingException(cosmosException)) { this.refreshRequestController(); } } @Override public boolean canHandleRequest(RxDocumentServiceRequest request) { return this.isDefault() || StringUtils.equals(this.group.getGroupName(), request.getThroughputControlGroupName()); } }
Is it a regression? Why isn't it caught before? Where is the source for it? Isn't it fragile.
public void conflictResolutionPolicyCRUD() { CosmosContainerProperties containerSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); database.createContainer(containerSettings, new CosmosContainerRequestOptions()).block(); CosmosAsyncContainer container = database.getContainer(containerSettings.getId()); containerSettings = container.read().block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy()); containerSettings = container.replace(containerSettings, null).block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); assertThat(containerSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo("/_ts"); testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.LAST_WRITER_WINS, new String[] { "/a", null, "" }, new String[] { "/a", "/_ts", "/_ts" }); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("/a/b")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid getPath."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path '\\\\/a\\\\/b' for last writer wins conflict resolution"); } else { throw e; } } containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("someText")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid path."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path 'someText' for last writer wins conflict resolution"); } else { throw e; } } testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.CUSTOM, new String[] { "dbs/mydb/colls" + "/mycoll/sprocs/randomSprocName", null, "" }, new String[] { "dbs/mydb/colls/mycoll/sprocs" + "/randomSprocName", "", "" }); }
assertThat(dce.getMessage()).contains("Invalid path '\\\\/a\\\\/b' for last writer wins conflict resolution");
public void conflictResolutionPolicyCRUD() { CosmosContainerProperties containerSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); database.createContainer(containerSettings, new CosmosContainerRequestOptions()).block(); CosmosAsyncContainer container = database.getContainer(containerSettings.getId()); containerSettings = container.read().block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy()); containerSettings = container.replace(containerSettings, null).block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); assertThat(containerSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo("/_ts"); testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.LAST_WRITER_WINS, new String[] { "/a", null, "" }, new String[] { "/a", "/_ts", "/_ts" }); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("/a/b")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid getPath."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path '\\\\/a\\\\/b' for last writer wins conflict resolution"); } else { throw e; } } containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("someText")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid path."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path 'someText' for last writer wins conflict resolution"); } else { throw e; } } testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.CUSTOM, new String[] { "dbs/mydb/colls" + "/mycoll/sprocs/randomSprocName", null, "" }, new String[] { "dbs/mydb/colls/mycoll/sprocs" + "/randomSprocName", "", "" }); }
class MultiMasterConflictResolutionTest extends TestSuiteBase { private static final int TIMEOUT = 40000; private final String databaseId = CosmosDatabaseForTest.generateId(); private PartitionKeyDefinition partitionKeyDef; private CosmosAsyncClient client; private CosmosAsyncDatabase database; @Factory(dataProvider = "clientBuilders") public MultiMasterConflictResolutionTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = "multi-master", timeOut = 10 * TIMEOUT) private void testConflictResolutionPolicyRequiringPath(ConflictResolutionMode conflictResolutionMode, String[] paths, String[] expectedPaths) { for (int i = 0; i < paths.length; i++) { CosmosContainerProperties collectionSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy(paths[i])); } else { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createCustomPolicy(paths[i])); } collectionSettings = database.createContainer(collectionSettings, new CosmosContainerRequestOptions()).block().getProperties(); assertThat(collectionSettings.getConflictResolutionPolicy().getMode()).isEqualTo(conflictResolutionMode); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo(expectedPaths[i]); } else { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionProcedure()).isEqualTo(expectedPaths[i]); } } } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_LastWriterWinsWithStoredProc() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.LAST_WRITER_WINS); ModelBridgeUtils.setStoredProc(policy,"randomSprocName"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("LastWriterWins conflict resolution mode should not have conflict resolution procedure set.") .build(); validateFailure(createObservable, validator); } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_CustomWithPath() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.CUSTOM); ModelBridgeUtils.setPath(policy,"/mypath"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("Custom conflict resolution mode should not have conflict resolution path set.") .build(); validateFailure(createObservable, validator); } @Test(groups = "multi-master", timeOut = 10 * TIMEOUT) public void updateConflictResolutionWithException() { CosmosContainerProperties containerSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); database.createContainer(containerSettings, new CosmosContainerRequestOptions()).block(); CosmosAsyncContainer container = database.getContainer(containerSettings.getId()); containerSettings = container.read().block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("/userProvidedField")); try { container.replace(containerSettings, null).block().getProperties(); fail("Updating conflict resolution policy should"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(ex.getMessage()).contains("Updating conflict resolution policy is currently not supported"); } } @BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT) public void before_MultiMasterConflictResolutionTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, databaseId); partitionKeyDef = new PartitionKeyDefinition(); ArrayList<String> paths = new ArrayList<String>(); paths.add("/mypk"); partitionKeyDef.setPaths(paths); } @AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeDeleteDatabase(database); safeClose(client); } }
class MultiMasterConflictResolutionTest extends TestSuiteBase { private static final int TIMEOUT = 40000; private final String databaseId = CosmosDatabaseForTest.generateId(); private PartitionKeyDefinition partitionKeyDef; private CosmosAsyncClient client; private CosmosAsyncDatabase database; @Factory(dataProvider = "clientBuilders") public MultiMasterConflictResolutionTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = "multi-master", timeOut = 10 * TIMEOUT) private void testConflictResolutionPolicyRequiringPath(ConflictResolutionMode conflictResolutionMode, String[] paths, String[] expectedPaths) { for (int i = 0; i < paths.length; i++) { CosmosContainerProperties collectionSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy(paths[i])); } else { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createCustomPolicy(paths[i])); } collectionSettings = database.createContainer(collectionSettings, new CosmosContainerRequestOptions()).block().getProperties(); assertThat(collectionSettings.getConflictResolutionPolicy().getMode()).isEqualTo(conflictResolutionMode); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo(expectedPaths[i]); } else { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionProcedure()).isEqualTo(expectedPaths[i]); } } } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_LastWriterWinsWithStoredProc() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.LAST_WRITER_WINS); ModelBridgeUtils.setStoredProc(policy,"randomSprocName"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("LastWriterWins conflict resolution mode should not have conflict resolution procedure set.") .build(); validateFailure(createObservable, validator); } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_CustomWithPath() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.CUSTOM); ModelBridgeUtils.setPath(policy,"/mypath"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("Custom conflict resolution mode should not have conflict resolution path set.") .build(); validateFailure(createObservable, validator); } @Test(groups = "multi-master", timeOut = 10 * TIMEOUT) public void updateConflictResolutionWithException() { CosmosContainerProperties containerSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); database.createContainer(containerSettings, new CosmosContainerRequestOptions()).block(); CosmosAsyncContainer container = database.getContainer(containerSettings.getId()); containerSettings = container.read().block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("/userProvidedField")); try { container.replace(containerSettings, null).block().getProperties(); fail("Updating conflict resolution policy should"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); assertThat(ex.getMessage()).contains("Updating conflict resolution policy is currently not supported"); } } @BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT) public void before_MultiMasterConflictResolutionTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, databaseId); partitionKeyDef = new PartitionKeyDefinition(); ArrayList<String> paths = new ArrayList<String>(); paths.add("/mypk"); partitionKeyDef.setPaths(paths); } @AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeDeleteDatabase(database); safeClose(client); } }
Will we have several signup or signin flows?
public ClientRegistrationRepository clientRegistrationRepository() { final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>(); final List<ClientRegistration> otherRegistrations = new ArrayList<>(); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUpOrSignIn()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getProfileEdit()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getPasswordReset()); return new AADB2CClientRegistrationRepository(signUpOrSignInRegistrations, otherRegistrations); }
final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>();
public ClientRegistrationRepository clientRegistrationRepository() { final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>(1); final List<ClientRegistration> otherRegistrations = new ArrayList<>(); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUpOrSignIn()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getProfileEdit()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getPasswordReset()); return new AADB2CClientRegistrationRepository(signUpOrSignInRegistrations, otherRegistrations); }
class AADB2COidcAutoConfiguration { private final AADB2CProperties properties; public AADB2COidcAutoConfiguration(@NonNull AADB2CProperties properties) { this.properties = properties; } private void addB2CClientRegistration(@NonNull List<ClientRegistration> registrations, String userFlow) { if (StringUtils.hasText(userFlow)) { registrations.add(b2cClientRegistration(userFlow)); } } @Bean @ConditionalOnMissingBean private ClientRegistration b2cClientRegistration(String userFlow) { Assert.hasText(userFlow, "User flow should contains text."); return ClientRegistration.withRegistrationId(userFlow) .clientId(properties.getClientId()) .clientSecret(properties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate(properties.getReplyUrl()) .scope(properties.getClientId(), "openid") .authorizationUri(AADB2CURL.getAuthorizationUrl(properties.getTenant())) .tokenUri(AADB2CURL.getTokenUrl(properties.getTenant(), userFlow)) .jwkSetUri(AADB2CURL.getJwkSetUrl(properties.getTenant(), userFlow)) .userNameAttributeName("name") .clientName(userFlow) .build(); } }
class AADB2COidcAutoConfiguration { private final AADB2CProperties properties; public AADB2COidcAutoConfiguration(@NonNull AADB2CProperties properties) { this.properties = properties; } private void addB2CClientRegistration(@NonNull List<ClientRegistration> registrations, String userFlow) { if (StringUtils.hasText(userFlow)) { registrations.add(b2cClientRegistration(userFlow)); } } @Bean @ConditionalOnMissingBean private ClientRegistration b2cClientRegistration(String userFlow) { Assert.hasText(userFlow, "User flow should contains text."); return ClientRegistration.withRegistrationId(userFlow) .clientId(properties.getClientId()) .clientSecret(properties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate(properties.getReplyUrl()) .scope(properties.getClientId(), "openid") .authorizationUri(AADB2CURL.getAuthorizationUrl(properties.getTenant())) .tokenUri(AADB2CURL.getTokenUrl(properties.getTenant(), userFlow)) .jwkSetUri(AADB2CURL.getJwkSetUrl(properties.getTenant(), userFlow)) .userNameAttributeName("name") .clientName(userFlow) .build(); } }
I see what you're trying to avoid here.
public Iterator<ClientRegistration> iterator() { return this.signUpOrSignInRegistrations.iterator(); }
return this.signUpOrSignInRegistrations.iterator();
public Iterator<ClientRegistration> iterator() { return this.signUpOrSignInRegistrations.iterator(); }
class AADB2CClientRegistrationRepository implements ClientRegistrationRepository, Iterable<ClientRegistration> { private final InMemoryClientRegistrationRepository clientRegistrations; private final List<ClientRegistration> signUpOrSignInRegistrations; AADB2CClientRegistrationRepository(List<ClientRegistration> signUpOrSignInRegistrations, List<ClientRegistration> otherRegistrations) { this.signUpOrSignInRegistrations = signUpOrSignInRegistrations; List<ClientRegistration> allRegistrations = Stream.of(signUpOrSignInRegistrations, otherRegistrations) .flatMap(Collection::stream) .collect(Collectors.toList()); this.clientRegistrations = new InMemoryClientRegistrationRepository(allRegistrations); } @Override public ClientRegistration findByRegistrationId(String registrationId) { return this.clientRegistrations.findByRegistrationId(registrationId); } @NotNull @Override }
class AADB2CClientRegistrationRepository implements ClientRegistrationRepository, Iterable<ClientRegistration> { private final InMemoryClientRegistrationRepository clientRegistrations; private final List<ClientRegistration> signUpOrSignInRegistrations; AADB2CClientRegistrationRepository(List<ClientRegistration> signUpOrSignInRegistrations, List<ClientRegistration> otherRegistrations) { this.signUpOrSignInRegistrations = signUpOrSignInRegistrations; List<ClientRegistration> allRegistrations = Stream.of(signUpOrSignInRegistrations, otherRegistrations) .flatMap(Collection::stream) .collect(Collectors.toList()); this.clientRegistrations = new InMemoryClientRegistrationRepository(allRegistrations); } @Override public ClientRegistration findByRegistrationId(String registrationId) { return this.clientRegistrations.findByRegistrationId(registrationId); } @NotNull @Override }
for now, our configuration file can only config one flow of signUpOrSignIn.
public ClientRegistrationRepository clientRegistrationRepository() { final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>(); final List<ClientRegistration> otherRegistrations = new ArrayList<>(); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUpOrSignIn()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getProfileEdit()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getPasswordReset()); return new AADB2CClientRegistrationRepository(signUpOrSignInRegistrations, otherRegistrations); }
final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>();
public ClientRegistrationRepository clientRegistrationRepository() { final List<ClientRegistration> signUpOrSignInRegistrations = new ArrayList<>(1); final List<ClientRegistration> otherRegistrations = new ArrayList<>(); addB2CClientRegistration(signUpOrSignInRegistrations, properties.getUserFlows().getSignUpOrSignIn()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getProfileEdit()); addB2CClientRegistration(otherRegistrations, properties.getUserFlows().getPasswordReset()); return new AADB2CClientRegistrationRepository(signUpOrSignInRegistrations, otherRegistrations); }
class AADB2COidcAutoConfiguration { private final AADB2CProperties properties; public AADB2COidcAutoConfiguration(@NonNull AADB2CProperties properties) { this.properties = properties; } private void addB2CClientRegistration(@NonNull List<ClientRegistration> registrations, String userFlow) { if (StringUtils.hasText(userFlow)) { registrations.add(b2cClientRegistration(userFlow)); } } @Bean @ConditionalOnMissingBean private ClientRegistration b2cClientRegistration(String userFlow) { Assert.hasText(userFlow, "User flow should contains text."); return ClientRegistration.withRegistrationId(userFlow) .clientId(properties.getClientId()) .clientSecret(properties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate(properties.getReplyUrl()) .scope(properties.getClientId(), "openid") .authorizationUri(AADB2CURL.getAuthorizationUrl(properties.getTenant())) .tokenUri(AADB2CURL.getTokenUrl(properties.getTenant(), userFlow)) .jwkSetUri(AADB2CURL.getJwkSetUrl(properties.getTenant(), userFlow)) .userNameAttributeName("name") .clientName(userFlow) .build(); } }
class AADB2COidcAutoConfiguration { private final AADB2CProperties properties; public AADB2COidcAutoConfiguration(@NonNull AADB2CProperties properties) { this.properties = properties; } private void addB2CClientRegistration(@NonNull List<ClientRegistration> registrations, String userFlow) { if (StringUtils.hasText(userFlow)) { registrations.add(b2cClientRegistration(userFlow)); } } @Bean @ConditionalOnMissingBean private ClientRegistration b2cClientRegistration(String userFlow) { Assert.hasText(userFlow, "User flow should contains text."); return ClientRegistration.withRegistrationId(userFlow) .clientId(properties.getClientId()) .clientSecret(properties.getClientSecret()) .clientAuthenticationMethod(ClientAuthenticationMethod.POST) .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate(properties.getReplyUrl()) .scope(properties.getClientId(), "openid") .authorizationUri(AADB2CURL.getAuthorizationUrl(properties.getTenant())) .tokenUri(AADB2CURL.getTokenUrl(properties.getTenant(), userFlow)) .jwkSetUri(AADB2CURL.getJwkSetUrl(properties.getTenant(), userFlow)) .userNameAttributeName("name") .clientName(userFlow) .build(); } }
Maybe we could handle the exception within the aadB2CSeleniumITHelper.quitDriver() method.
public void quitDriver() { try { aadB2CSeleniumITHelper.quitDriver(); }catch (Exception e){ aadB2CSeleniumITHelper = null; } }
aadB2CSeleniumITHelper.quitDriver();
public void quitDriver() { aadB2CSeleniumITHelper.quitDriver(); }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Test public void testSignIn() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); } @Test public void testProfileEdit() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); } @Test public void testLogOut() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String signInButtonText = aadB2CSeleniumITHelper.logoutAndGetSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); } @After @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Test public void testSignIn() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); } @Test public void testProfileEdit() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); } @Test public void testLogOut() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String signInButtonText = aadB2CSeleniumITHelper.logoutAndGetSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); } @After @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
Maybe this could fit into one line now?
public void testSignIn() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); }
Collections.emptyMap());
public void testSignIn() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Test @Test public void testProfileEdit() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); } @Test public void testLogOut() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String signInButtonText = aadB2CSeleniumITHelper.logoutAndGetSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); } @After public void quitDriver() { try { aadB2CSeleniumITHelper.quitDriver(); }catch (Exception e){ aadB2CSeleniumITHelper = null; } } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Test @Test public void testProfileEdit() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); } @Test public void testLogOut() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String signInButtonText = aadB2CSeleniumITHelper.logoutAndGetSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); } @After public void quitDriver() { aadB2CSeleniumITHelper.quitDriver(); } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
same here
public void testLogOut() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String signInButtonText = aadB2CSeleniumITHelper.logoutAndGetSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); }
Collections.emptyMap());
public void testLogOut() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String signInButtonText = aadB2CSeleniumITHelper.logoutAndGetSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Test public void testSignIn() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); } @Test public void testProfileEdit() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); } @Test @After public void quitDriver() { aadB2CSeleniumITHelper.quitDriver(); } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Test public void testSignIn() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); } @Test public void testProfileEdit() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); } @Test @After public void quitDriver() { aadB2CSeleniumITHelper.quitDriver(); } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
same here
public void testProfileEdit() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); }
aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class,
public void testProfileEdit() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Test public void testSignIn() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); } @Test @Test public void testLogOut() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String signInButtonText = aadB2CSeleniumITHelper.logoutAndGetSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); } @After public void quitDriver() { aadB2CSeleniumITHelper.quitDriver(); } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Test public void testSignIn() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); } @Test @Test public void testLogOut() throws InterruptedException { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadB2CSeleniumITHelper.signIn(AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); String signInButtonText = aadB2CSeleniumITHelper.logoutAndGetSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); } @After public void quitDriver() { aadB2CSeleniumITHelper.quitDriver(); } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
Do we actually guarantee the order of the partition key paths as they were created? and even if the response might return the values in lets say alphabetical order, we cannot assume that the de-serialization of the response will keep that order.
public void crudMultiHashContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); partitionKeyDefinition.setKind(PartitionKind.MULTI_HASH); partitionKeyDefinition.setVersion(PartitionKeyDefinitionVersion.V2); ArrayList<String> paths = new ArrayList<>(); paths.add("/city"); paths.add("/zipcode"); partitionKeyDefinition.setPaths(paths); CosmosContainerProperties containerProperties = getContainerDefinition(collectionName, partitionKeyDefinition); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getKind() == PartitionKind.MULTI_HASH); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().size() == paths.size()); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0)); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(1) == paths.get(1)); CosmosContainer multiHashContainer = createdDatabase.getContainer(collectionName); containerResponse = multiHashContainer.read(); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getKind() == PartitionKind.MULTI_HASH); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().size() == paths.size()); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0)); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(1) == paths.get(1)); CosmosContainerResponse deleteResponse = multiHashContainer.delete(); }
assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0));
public void crudMultiHashContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); partitionKeyDefinition.setKind(PartitionKind.MULTI_HASH); partitionKeyDefinition.setVersion(PartitionKeyDefinitionVersion.V2); ArrayList<String> paths = new ArrayList<>(); paths.add("/city"); paths.add("/zipcode"); partitionKeyDefinition.setPaths(paths); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName, partitionKeyDefinition); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getKind() == PartitionKind.MULTI_HASH); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().size() == paths.size()); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0)); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(1) == paths.get(1)); CosmosContainer multiHashContainer = createdDatabase.getContainer(collectionName); containerResponse = multiHashContainer.read(); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getKind() == PartitionKind.MULTI_HASH); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().size() == paths.size()); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0)); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(1) == paths.get(1)); CosmosContainerResponse deleteResponse = multiHashContainer.delete(); }
class CosmosContainerTest extends TestSuiteBase { private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); private CosmosClient client; private CosmosDatabase createdDatabase; @Factory(dataProvider = "clientBuilders") public CosmosContainerTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildClient(); createdDatabase = createSyncDatabase(client, preExistingDatabaseId); } @AfterClass(groups = {"emulator"}, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteSyncDatabase(createdDatabase); safeCloseSyncClient(client); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withProperties() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponse(containerProperties, containerResponse); } @DataProvider public static Object[][] analyticalTTLProvider() { return new Object[][]{ {-1}, {0}, {10}, {null} }; } @Test(groups = { "emulator" }, timeOut = TIMEOUT, dataProvider = "analyticalTTLProvider", enabled = false) public void createContainer_withAnalyticalTTL(Integer analyticalTTL) throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = new CosmosContainerProperties(collectionName, "/id"); containerProperties.setAnalyticalStoreTimeToLiveInSeconds(analyticalTTL); if (analyticalTTL != null && analyticalTTL > 0) { containerProperties.setDefaultTimeToLiveInSeconds(analyticalTTL - 1); } CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getAnalyticalStoreTimeToLiveInSeconds()).isEqualTo(analyticalTTL); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void createContainer_alreadyExists() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); try { createdDatabase.createContainer(containerProperties); } catch (Exception e) { assertThat(e).isInstanceOf(CosmosException.class); assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withThroughput() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, options); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withThroughputAndOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, throughput, options); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withNameAndPartitoinKeyPath() throws Exception { String collectionName = UUID.randomUUID().toString(); String partitionKeyPath = "/mypk"; CosmosContainerResponse containerResponse = createdDatabase.createContainer(collectionName, partitionKeyPath); validateContainerResponse(new CosmosContainerProperties(collectionName, partitionKeyPath), containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withNamePartitionPathAndThroughput() throws Exception { String collectionName = UUID.randomUUID().toString(); String partitionKeyPath = "/mypk"; int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(collectionName, partitionKeyPath, ThroughputProperties.createManualThroughput(throughput)); validateContainerResponse(new CosmosContainerProperties(collectionName, partitionKeyPath), containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse read = syncContainer.read(); validateContainerResponse(containerProperties, read); CosmosContainerResponse read1 = syncContainer.read(options); validateContainerResponse(containerProperties, read1); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getFeedRanges() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); List<FeedRange> feedRanges = syncContainer.getFeedRanges(); assertThat(feedRanges) .isNotNull() .hasSize(1); assertThat(feedRanges.get(0).toJsonString()) .isNotNull() .isEqualTo("{\"PKRangeId\":\"0\"}"); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void deleteContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse deleteResponse = syncContainer.delete(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void deleteContainer_withOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse deleteResponse = syncContainer.delete(options); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void replace() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getIndexingPolicy().getIndexingMode()).isEqualTo(IndexingMode.CONSISTENT); CosmosContainerResponse replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse.getProperties().setIndexingPolicy( new IndexingPolicy().setIndexingMode(IndexingMode.CONSISTENT))); assertThat(replaceResponse.getProperties().getIndexingPolicy().getIndexingMode()) .isEqualTo(IndexingMode.CONSISTENT); CosmosContainerResponse replaceResponse1 = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse.getProperties().setIndexingPolicy( new IndexingPolicy().setIndexingMode(IndexingMode.CONSISTENT)), options); assertThat(replaceResponse1.getProperties().getIndexingPolicy().getIndexingMode()) .isEqualTo(IndexingMode.CONSISTENT); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readAllContainers() throws Exception{ String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator = createdDatabase.readAllContainers(); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator1 = createdDatabase.readAllContainers(cosmosQueryRequestOptions); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void queryContainer() throws Exception{ String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); String query = String.format("SELECT * from c where c.id = '%s'", collectionName); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator = createdDatabase.queryContainers(query); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator1 = createdDatabase.queryContainers(query, cosmosQueryRequestOptions); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator2 = createdDatabase.queryContainers(querySpec); assertThat(feedResponseIterator2.iterator().hasNext()).isTrue(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator3 = createdDatabase.queryContainers(querySpec, cosmosQueryRequestOptions); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } private void validateContainerResponse(CosmosContainerProperties containerProperties, CosmosContainerResponse createResponse) { assertThat(createResponse.getProperties().getId()).isNotNull(); assertThat(createResponse.getProperties().getId()) .as("check Resource Id") .isEqualTo(containerProperties.getId()); } }
class CosmosContainerTest extends TestSuiteBase { private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); private CosmosClient client; private CosmosDatabase createdDatabase; private CosmosContainer createdContainer; @Factory(dataProvider = "clientBuilders") public CosmosContainerTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildClient(); createdDatabase = createSyncDatabase(client, preExistingDatabaseId); createEncryptionKey(); } @AfterClass(groups = {"emulator"}, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteSyncDatabase(createdDatabase); safeCloseSyncClient(client); } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.createdContainer = null; } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.createdContainer != null) { try { this.createdContainer.delete(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withProperties() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withEncryption() { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); ClientEncryptionIncludedPath path1 = new ClientEncryptionIncludedPath(); path1.setPath("/path1"); path1.setEncryptionAlgorithm("AEAD_AES_256_CBC_HMAC_SHA256"); path1.setEncryptionType("Randomized"); path1.setClientEncryptionKeyId("containerTestKey1"); ClientEncryptionIncludedPath path2 = new ClientEncryptionIncludedPath(); path2.setPath("/path2"); path2.setEncryptionAlgorithm("AEAD_AES_256_CBC_HMAC_SHA256"); path2.setEncryptionType("Deterministic"); path2.setClientEncryptionKeyId("containerTestKey2"); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(path1); paths.add(path2); ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(paths); containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponseWithEncryption(containerProperties, containerResponse, clientEncryptionPolicy); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void createContainer_withPartitionKeyInEncryption() { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); ClientEncryptionIncludedPath path1 = new ClientEncryptionIncludedPath(); path1.setPath("/mypk"); path1.setEncryptionAlgorithm("AEAD_AES_256_CBC_HMAC_SHA256"); path1.setEncryptionType("Randomized"); path1.setClientEncryptionKeyId("containerTestKey1"); ClientEncryptionIncludedPath path2 = new ClientEncryptionIncludedPath(); path2.setPath("/path2"); path2.setEncryptionAlgorithm("AEAD_AES_256_CBC_HMAC_SHA256"); path2.setEncryptionType("Deterministic"); path2.setClientEncryptionKeyId("containerTestKey2"); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(path1); paths.add(path2); ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(paths); CosmosContainerResponse containerResponse = null; try { containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); containerResponse = createdDatabase.createContainer(containerProperties); fail("createContainer should fail as mypk which is part of the partition key cannot be included in the " + "ClientEncryptionPolicy."); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).isEqualTo("Path mypk which is part of the partition key cannot be included in" + " the ClientEncryptionPolicy."); } collectionName = UUID.randomUUID().toString(); containerProperties = new CosmosContainerProperties(collectionName, "/mypk/mypk1"); try { containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); containerResponse = createdDatabase.createContainer(containerProperties); fail("createContainer should fail as mypk which is part of the partition key cannot be included in the " + "ClientEncryptionPolicy."); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).isEqualTo("Path mypk which is part of the partition key cannot be included in" + " the ClientEncryptionPolicy."); } collectionName = UUID.randomUUID().toString(); containerProperties = new CosmosContainerProperties(collectionName, "/differentKey"); try { containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); List<String> keyPaths = new ArrayList<>(); keyPaths.add("/mypk"); partitionKeyDefinition.setPaths(keyPaths); containerProperties.setPartitionKeyDefinition(partitionKeyDefinition); containerResponse = createdDatabase.createContainer(containerProperties); fail("createContainer should fail as mypk which is part of the partition key cannot be included in the " + "ClientEncryptionPolicy."); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).isEqualTo("Path mypk which is part of the partition key cannot be included in" + " the ClientEncryptionPolicy."); } collectionName = UUID.randomUUID().toString(); containerProperties = new CosmosContainerProperties(collectionName, "/mypk1/mypk"); containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); containerResponse = createdDatabase.createContainer(containerProperties); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponseWithEncryption(containerProperties, containerResponse, clientEncryptionPolicy); } @DataProvider public static Object[][] analyticalTTLProvider() { return new Object[][]{ {-1}, {0}, {10}, {null} }; } @Test(groups = { "emulator" }, timeOut = TIMEOUT, dataProvider = "analyticalTTLProvider", enabled = false) public void createContainer_withAnalyticalTTL(Integer analyticalTTL) throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = new CosmosContainerProperties(collectionName, "/id"); containerProperties.setAnalyticalStoreTimeToLiveInSeconds(analyticalTTL); if (analyticalTTL != null && analyticalTTL > 0) { containerProperties.setDefaultTimeToLiveInSeconds(analyticalTTL - 1); } CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getAnalyticalStoreTimeToLiveInSeconds()).isEqualTo(analyticalTTL); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void createContainer_alreadyExists() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); try { createdDatabase.createContainer(containerProperties); } catch (Exception e) { assertThat(e).isInstanceOf(CosmosException.class); assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withThroughput() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withFullFidelityChangeFeedPolicy() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); containerProperties.setChangeFeedPolicy( ChangeFeedPolicy.createFullFidelityPolicy( Duration.ofMinutes(8))); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ofMinutes(8)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withIncrementalChangeFeedPolicy() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ZERO); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withDefaultChangeFeedPolicy() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ZERO); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, options); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withThroughputAndOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, throughput, options); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withNameAndPartitionKeyPath() throws Exception { String collectionName = UUID.randomUUID().toString(); String partitionKeyPath = "/mypk"; CosmosContainerResponse containerResponse = createdDatabase.createContainer(collectionName, partitionKeyPath); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(new CosmosContainerProperties(collectionName, partitionKeyPath), containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withNamePartitionPathAndThroughput() throws Exception { String collectionName = UUID.randomUUID().toString(); String partitionKeyPath = "/mypk"; int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(collectionName, partitionKeyPath, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(new CosmosContainerProperties(collectionName, partitionKeyPath), containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse read = syncContainer.read(); validateContainerResponse(containerProperties, read); CosmosContainerResponse read1 = syncContainer.read(options); validateContainerResponse(containerProperties, read1); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getFeedRanges() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); List<FeedRange> feedRanges = syncContainer.getFeedRanges(); assertThat(feedRanges) .isNotNull() .hasSize(1); assertFeedRange(feedRanges.get(0), "{\"Range\":{\"min\":\"\",\"max\":\"FF\"}}"); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void trySplitRanges_for_NonExistingContainer() throws Exception { CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosAsyncContainer nonExistingContainer = createdDatabase.getContainer("NonExistingContainer").asyncContainer; CosmosException cosmosException = null; try { List<FeedRangeEpkImpl> splitFeedRanges = nonExistingContainer.trySplitFeedRange( FeedRange.forFullRange(), 3 ).block(); } catch (CosmosException error) { cosmosException = error; } assertThat(cosmosException).isNotNull(); assertThat(cosmosException.getStatusCode()).isEqualTo(404); } private void assertFeedRange(FeedRange feedRange, String expectedJson) { assertThat(((FeedRangeInternal)feedRange).toJson()) .isNotNull() .isEqualTo(expectedJson); assertThat(feedRange.toString()) .isNotNull() .isEqualTo(Base64.getUrlEncoder().encodeToString(expectedJson.getBytes(StandardCharsets.UTF_8) )); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getNormalizedFeedRanges_HashV1() { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); createdDatabase.createContainer(containerProperties, options); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); FeedRange fullRange = FeedRange.forFullRange(); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(fullRange).block()) .isNotNull() .isEqualTo(new Range<>("", "FF", true, false)); Range<String> expectedRange = new Range<>("AA", "BB", true, false); FeedRange epkRange = new FeedRangeEpkImpl(expectedRange); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(epkRange).block()) .isNotNull() .isEqualTo(expectedRange); FeedRange pointEpkRange = new FeedRangeEpkImpl( new Range<>("05C1D5AB55AB54", "05C1D5AB55AB54", true, true)); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(pointEpkRange).block()) .isNotNull() .isEqualTo(new Range<>("05C1D5AB55AB54", "05C1D5AB55AB55", true, false)); FeedRange pkRangeIdRange = new FeedRangePartitionKeyRangeImpl("0"); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(pkRangeIdRange).block()) .isNotNull() .isEqualTo(new Range<>("", "FF", true, false)); FeedRange logicalPartitionFeedRange = FeedRange.forLogicalPartition(new PartitionKey("Hello World")); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(logicalPartitionFeedRange).block()) .isNotNull() .isEqualTo(new Range<>( "05C1C5D58F13B00849666D6D70215870736D6500", "05C1C5D58F13B00849666D6D70215870736D6501", true, false)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getNormalizedFeedRanges_HashV2() { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinitionForHashV2(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); createdDatabase.createContainer(containerProperties, options); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); FeedRange fullRange = FeedRange.forFullRange(); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(fullRange).block()) .isNotNull() .isEqualTo(new Range<>("", "FF", true, false)); Range<String> expectedRange = new Range<>("AA", "BB", true, false); FeedRange epkRange = new FeedRangeEpkImpl(expectedRange); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(epkRange).block()) .isNotNull() .isEqualTo(expectedRange); FeedRange pointEpkRange = new FeedRangeEpkImpl( new Range<>("05C1D5AB55AB54", "05C1D5AB55AB54", true, true)); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(pointEpkRange).block()) .isNotNull() .isEqualTo(new Range<>("05C1D5AB55AB54", "05C1D5AB55AB55", true, false)); FeedRange pkRangeIdRange = new FeedRangePartitionKeyRangeImpl("0"); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(pkRangeIdRange).block()) .isNotNull() .isEqualTo(new Range<>("", "FF", true, false)); FeedRange logicalPartitionFeedRange = FeedRange.forLogicalPartition(new PartitionKey("Hello World")); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(logicalPartitionFeedRange).block()) .isNotNull() .isEqualTo(new Range<>( "306C52B42DECB3AE9D3C7586975E30B9", "306C52B42DECB3AE9D3C7586975E30BA", true, false)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getFeedRanges_withMultiplePartitions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(18000)); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); List<FeedRange> feedRanges = syncContainer.getFeedRanges(); assertThat(feedRanges) .isNotNull() .hasSize(3); assertFeedRange(feedRanges.get(0), "{\"Range\":{\"min\":\"\",\"max\":\"05C1D5AB55AB54\"}}"); assertFeedRange(feedRanges.get(1), "{\"Range\":{\"min\":\"05C1D5AB55AB54\",\"max\":\"05C1E5AB55AB54\"}}"); assertFeedRange(feedRanges.get(2), "{\"Range\":{\"min\":\"05C1E5AB55AB54\",\"max\":\"FF\"}}"); Range<String> firstEpkRange = getEffectiveRange(syncContainer, feedRanges.get(0)); Range<String> secondEpkRange = getEffectiveRange(syncContainer, feedRanges.get(1)); Range<String> thirdEpkRange = getEffectiveRange(syncContainer, feedRanges.get(2)); List<FeedRangeEpkImpl> feedRangesAfterSplit = syncContainer .asyncContainer .trySplitFeedRange(FeedRange.forFullRange(), 3) .block(); assertThat(feedRangesAfterSplit) .isNotNull() .hasSize(3); String leftMin = getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).getMin(); String rightMin = firstEpkRange.getMin(); String leftMax = getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).getMax(); String rightMax = firstEpkRange.getMax(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).equals(firstEpkRange)) .isTrue(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(1)).equals(secondEpkRange)) .isTrue(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(2)).equals(thirdEpkRange)) .isTrue(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getFeedRanges_withMultiplePartitions_HashV2() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinitionForHashV2(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(18000)); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); List<FeedRange> feedRanges = syncContainer.getFeedRanges(); assertThat(feedRanges) .isNotNull() .hasSize(3); assertFeedRange( feedRanges.get(0), "{\"Range\":{\"min\":\"\",\"max\":\"15555555555555555555555555555555\"}}"); assertFeedRange( feedRanges.get(1), "{\"Range\":{\"min\":\"15555555555555555555555555555555\"," + "\"max\":\"2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"}}"); assertFeedRange( feedRanges.get(2), "{\"Range\":{\"min\":\"2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\",\"max\":\"FF\"}}"); Range<String> firstEpkRange = getEffectiveRange(syncContainer, feedRanges.get(0)); Range<String> secondEpkRange = getEffectiveRange(syncContainer, feedRanges.get(1)); Range<String> thirdEpkRange = getEffectiveRange(syncContainer, feedRanges.get(2)); List<FeedRangeEpkImpl> feedRangesAfterSplit = syncContainer .asyncContainer .trySplitFeedRange(FeedRange.forFullRange(), 3) .block(); assertThat(feedRangesAfterSplit) .isNotNull() .hasSize(3); String leftMin = getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).getMin(); String rightMin = firstEpkRange.getMin(); String leftMax = getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).getMax(); String rightMax = firstEpkRange.getMax(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).equals(firstEpkRange)) .isTrue(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(1)).equals(secondEpkRange)) .isTrue(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(2)).equals(thirdEpkRange)) .isTrue(); } private static Range<String> getEffectiveRange(CosmosContainer container, FeedRange feedRange) { AsyncDocumentClient clientWrapper = container.asyncContainer.getDatabase().getDocClientWrapper(); return FeedRangeInternal .convert(feedRange) .getNormalizedEffectiveRange( clientWrapper.getPartitionKeyRangeCache(), null, Mono.just(Utils.ValueHolder.initialize( clientWrapper.getCollectionCache().resolveByNameAsync( null, container.asyncContainer.getLink(), null ).block()))).block(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void deleteContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse deleteResponse = syncContainer.delete(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void deleteContainer_withOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse deleteResponse = syncContainer.delete(options); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void replace() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); this.createdContainer = createdDatabase.getContainer(collectionName); assertThat(containerResponse.getProperties().getIndexingPolicy().getIndexingMode()).isEqualTo(IndexingMode.CONSISTENT); CosmosContainerResponse replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse.getProperties().setIndexingPolicy( new IndexingPolicy().setAutomatic(false).setIndexingMode(IndexingMode.NONE))); assertThat(replaceResponse.getProperties().getIndexingPolicy().getIndexingMode()) .isEqualTo(IndexingMode.NONE); assertThat(replaceResponse.getProperties().getIndexingPolicy().isAutomatic()) .isEqualTo(false); replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse.getProperties().setIndexingPolicy( new IndexingPolicy().setAutomatic(true).setIndexingMode(IndexingMode.CONSISTENT)), options); assertThat(replaceResponse.getProperties().getIndexingPolicy().getIndexingMode()) .isEqualTo(IndexingMode.CONSISTENT); assertThat(replaceResponse.getProperties().getIndexingPolicy().isAutomatic()) .isEqualTo(true); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void enableFullFidelityChangeFeedForExistingContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); this.createdContainer = createdDatabase.getContainer(collectionName); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ZERO); CosmosContainerResponse replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse .getProperties() .setChangeFeedPolicy( ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(4)))); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ofMinutes(4)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void changeFullFidelityChangeFeedRetentionDurationForExistingContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(3))); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ofMinutes(3)); CosmosContainerResponse replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse .getProperties() .setChangeFeedPolicy( ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(6)))); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ofMinutes(6)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readAllContainers() throws Exception{ String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator = createdDatabase.readAllContainers(); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator1 = createdDatabase.readAllContainers(cosmosQueryRequestOptions); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void queryContainer() throws Exception{ String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); String query = String.format("SELECT * from c where c.id = '%s'", collectionName); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator = createdDatabase.queryContainers(query); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator1 = createdDatabase.queryContainers(query, cosmosQueryRequestOptions); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator2 = createdDatabase.queryContainers(querySpec); assertThat(feedResponseIterator2.iterator().hasNext()).isTrue(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator3 = createdDatabase.queryContainers(querySpec, cosmosQueryRequestOptions); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } private void validateContainerResponse(CosmosContainerProperties containerProperties, CosmosContainerResponse createResponse) { assertThat(createResponse.getProperties().getId()).isNotNull(); assertThat(createResponse.getProperties().getId()) .as("check Resource Id") .isEqualTo(containerProperties.getId()); } private void validateContainerResponseWithEncryption(CosmosContainerProperties containerProperties, CosmosContainerResponse createResponse, ClientEncryptionPolicy clientEncryptionPolicy) { validateContainerResponse(containerProperties, createResponse); assertThat(createResponse.getProperties().getClientEncryptionPolicy()).isNotNull(); assertThat(createResponse.getProperties().getClientEncryptionPolicy().getIncludedPaths().size()).isEqualTo(clientEncryptionPolicy.getIncludedPaths().size()); for (ClientEncryptionIncludedPath clientEncryptionIncludedPath : createResponse.getProperties().getClientEncryptionPolicy().getIncludedPaths()) { for (ClientEncryptionIncludedPath includedPath : clientEncryptionPolicy.getIncludedPaths()) { if (clientEncryptionIncludedPath.getPath().equals(includedPath.getPath())) { assertThat(clientEncryptionIncludedPath.getClientEncryptionKeyId()).isEqualTo(includedPath.getClientEncryptionKeyId()); assertThat(clientEncryptionIncludedPath.getEncryptionAlgorithm()).isEqualTo(includedPath.getEncryptionAlgorithm()); assertThat(clientEncryptionIncludedPath.getEncryptionType()).isEqualTo(includedPath.getEncryptionType()); break; } } } } private void createEncryptionKey() { EncryptionKeyWrapMetadata encryptionKeyWrapMetadata = new EncryptionKeyWrapMetadata("key1", "tempmetadata1", "custom"); byte[] key = Hex.decode(("34 62 52 77 f9 ee 11 9f 04 8c 6f 50 9c e4 c2 5b b3 39 f4 d0 4d c1 6a 32 fa 2b 3b aa " + "ae 1e d9 1c").replace(" ", "")); CosmosClientEncryptionKeyProperties cosmosClientEncryptionKeyProperties1 = new CosmosClientEncryptionKeyProperties("containerTestKey1", "AEAD_AES_256_CBC_HMAC_SHA256", key, encryptionKeyWrapMetadata); CosmosClientEncryptionKeyProperties cosmosClientEncryptionKeyProperties2 = new CosmosClientEncryptionKeyProperties("containerTestKey2", "AEAD_AES_256_CBC_HMAC_SHA256", key, encryptionKeyWrapMetadata); client.asyncClient().getDatabase(createdDatabase.getId()).createClientEncryptionKey(cosmosClientEncryptionKeyProperties1).block(); client.asyncClient().getDatabase(createdDatabase.getId()).createClientEncryptionKey(cosmosClientEncryptionKeyProperties2).block(); } }
Yes, the order is guaranteed..
public void crudMultiHashContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); partitionKeyDefinition.setKind(PartitionKind.MULTI_HASH); partitionKeyDefinition.setVersion(PartitionKeyDefinitionVersion.V2); ArrayList<String> paths = new ArrayList<>(); paths.add("/city"); paths.add("/zipcode"); partitionKeyDefinition.setPaths(paths); CosmosContainerProperties containerProperties = getContainerDefinition(collectionName, partitionKeyDefinition); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getKind() == PartitionKind.MULTI_HASH); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().size() == paths.size()); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0)); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(1) == paths.get(1)); CosmosContainer multiHashContainer = createdDatabase.getContainer(collectionName); containerResponse = multiHashContainer.read(); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getKind() == PartitionKind.MULTI_HASH); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().size() == paths.size()); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0)); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(1) == paths.get(1)); CosmosContainerResponse deleteResponse = multiHashContainer.delete(); }
assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0));
public void crudMultiHashContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); partitionKeyDefinition.setKind(PartitionKind.MULTI_HASH); partitionKeyDefinition.setVersion(PartitionKeyDefinitionVersion.V2); ArrayList<String> paths = new ArrayList<>(); paths.add("/city"); paths.add("/zipcode"); partitionKeyDefinition.setPaths(paths); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName, partitionKeyDefinition); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getKind() == PartitionKind.MULTI_HASH); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().size() == paths.size()); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0)); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(1) == paths.get(1)); CosmosContainer multiHashContainer = createdDatabase.getContainer(collectionName); containerResponse = multiHashContainer.read(); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getKind() == PartitionKind.MULTI_HASH); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().size() == paths.size()); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(0) == paths.get(0)); assertThat(containerResponse.getProperties().getPartitionKeyDefinition().getPaths().get(1) == paths.get(1)); CosmosContainerResponse deleteResponse = multiHashContainer.delete(); }
class CosmosContainerTest extends TestSuiteBase { private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); private CosmosClient client; private CosmosDatabase createdDatabase; @Factory(dataProvider = "clientBuilders") public CosmosContainerTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildClient(); createdDatabase = createSyncDatabase(client, preExistingDatabaseId); } @AfterClass(groups = {"emulator"}, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteSyncDatabase(createdDatabase); safeCloseSyncClient(client); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withProperties() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponse(containerProperties, containerResponse); } @DataProvider public static Object[][] analyticalTTLProvider() { return new Object[][]{ {-1}, {0}, {10}, {null} }; } @Test(groups = { "emulator" }, timeOut = TIMEOUT, dataProvider = "analyticalTTLProvider", enabled = false) public void createContainer_withAnalyticalTTL(Integer analyticalTTL) throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = new CosmosContainerProperties(collectionName, "/id"); containerProperties.setAnalyticalStoreTimeToLiveInSeconds(analyticalTTL); if (analyticalTTL != null && analyticalTTL > 0) { containerProperties.setDefaultTimeToLiveInSeconds(analyticalTTL - 1); } CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getAnalyticalStoreTimeToLiveInSeconds()).isEqualTo(analyticalTTL); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void createContainer_alreadyExists() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); try { createdDatabase.createContainer(containerProperties); } catch (Exception e) { assertThat(e).isInstanceOf(CosmosException.class); assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withThroughput() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, options); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withThroughputAndOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, throughput, options); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withNameAndPartitoinKeyPath() throws Exception { String collectionName = UUID.randomUUID().toString(); String partitionKeyPath = "/mypk"; CosmosContainerResponse containerResponse = createdDatabase.createContainer(collectionName, partitionKeyPath); validateContainerResponse(new CosmosContainerProperties(collectionName, partitionKeyPath), containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withNamePartitionPathAndThroughput() throws Exception { String collectionName = UUID.randomUUID().toString(); String partitionKeyPath = "/mypk"; int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(collectionName, partitionKeyPath, ThroughputProperties.createManualThroughput(throughput)); validateContainerResponse(new CosmosContainerProperties(collectionName, partitionKeyPath), containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse read = syncContainer.read(); validateContainerResponse(containerProperties, read); CosmosContainerResponse read1 = syncContainer.read(options); validateContainerResponse(containerProperties, read1); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getFeedRanges() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); List<FeedRange> feedRanges = syncContainer.getFeedRanges(); assertThat(feedRanges) .isNotNull() .hasSize(1); assertThat(feedRanges.get(0).toJsonString()) .isNotNull() .isEqualTo("{\"PKRangeId\":\"0\"}"); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void deleteContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse deleteResponse = syncContainer.delete(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void deleteContainer_withOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse deleteResponse = syncContainer.delete(options); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void replace() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getIndexingPolicy().getIndexingMode()).isEqualTo(IndexingMode.CONSISTENT); CosmosContainerResponse replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse.getProperties().setIndexingPolicy( new IndexingPolicy().setIndexingMode(IndexingMode.CONSISTENT))); assertThat(replaceResponse.getProperties().getIndexingPolicy().getIndexingMode()) .isEqualTo(IndexingMode.CONSISTENT); CosmosContainerResponse replaceResponse1 = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse.getProperties().setIndexingPolicy( new IndexingPolicy().setIndexingMode(IndexingMode.CONSISTENT)), options); assertThat(replaceResponse1.getProperties().getIndexingPolicy().getIndexingMode()) .isEqualTo(IndexingMode.CONSISTENT); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readAllContainers() throws Exception{ String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator = createdDatabase.readAllContainers(); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator1 = createdDatabase.readAllContainers(cosmosQueryRequestOptions); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void queryContainer() throws Exception{ String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); String query = String.format("SELECT * from c where c.id = '%s'", collectionName); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator = createdDatabase.queryContainers(query); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator1 = createdDatabase.queryContainers(query, cosmosQueryRequestOptions); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator2 = createdDatabase.queryContainers(querySpec); assertThat(feedResponseIterator2.iterator().hasNext()).isTrue(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator3 = createdDatabase.queryContainers(querySpec, cosmosQueryRequestOptions); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } private void validateContainerResponse(CosmosContainerProperties containerProperties, CosmosContainerResponse createResponse) { assertThat(createResponse.getProperties().getId()).isNotNull(); assertThat(createResponse.getProperties().getId()) .as("check Resource Id") .isEqualTo(containerProperties.getId()); } }
class CosmosContainerTest extends TestSuiteBase { private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); private CosmosClient client; private CosmosDatabase createdDatabase; private CosmosContainer createdContainer; @Factory(dataProvider = "clientBuilders") public CosmosContainerTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) public void before_CosmosContainerTest() { client = getClientBuilder().buildClient(); createdDatabase = createSyncDatabase(client, preExistingDatabaseId); createEncryptionKey(); } @AfterClass(groups = {"emulator"}, timeOut = 3 * SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting ...."); safeDeleteSyncDatabase(createdDatabase); safeCloseSyncClient(client); } @BeforeMethod(groups = { "emulator" }) public void beforeTest() throws Exception { this.createdContainer = null; } @AfterMethod(groups = { "emulator" }) public void afterTest() throws Exception { if (this.createdContainer != null) { try { this.createdContainer.delete(); } catch (CosmosException error) { if (error.getStatusCode() != 404) { throw error; } } } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withProperties() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withEncryption() { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); ClientEncryptionIncludedPath path1 = new ClientEncryptionIncludedPath(); path1.setPath("/path1"); path1.setEncryptionAlgorithm("AEAD_AES_256_CBC_HMAC_SHA256"); path1.setEncryptionType("Randomized"); path1.setClientEncryptionKeyId("containerTestKey1"); ClientEncryptionIncludedPath path2 = new ClientEncryptionIncludedPath(); path2.setPath("/path2"); path2.setEncryptionAlgorithm("AEAD_AES_256_CBC_HMAC_SHA256"); path2.setEncryptionType("Deterministic"); path2.setClientEncryptionKeyId("containerTestKey2"); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(path1); paths.add(path2); ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(paths); containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponseWithEncryption(containerProperties, containerResponse, clientEncryptionPolicy); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void createContainer_withPartitionKeyInEncryption() { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); ClientEncryptionIncludedPath path1 = new ClientEncryptionIncludedPath(); path1.setPath("/mypk"); path1.setEncryptionAlgorithm("AEAD_AES_256_CBC_HMAC_SHA256"); path1.setEncryptionType("Randomized"); path1.setClientEncryptionKeyId("containerTestKey1"); ClientEncryptionIncludedPath path2 = new ClientEncryptionIncludedPath(); path2.setPath("/path2"); path2.setEncryptionAlgorithm("AEAD_AES_256_CBC_HMAC_SHA256"); path2.setEncryptionType("Deterministic"); path2.setClientEncryptionKeyId("containerTestKey2"); List<ClientEncryptionIncludedPath> paths = new ArrayList<>(); paths.add(path1); paths.add(path2); ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(paths); CosmosContainerResponse containerResponse = null; try { containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); containerResponse = createdDatabase.createContainer(containerProperties); fail("createContainer should fail as mypk which is part of the partition key cannot be included in the " + "ClientEncryptionPolicy."); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).isEqualTo("Path mypk which is part of the partition key cannot be included in" + " the ClientEncryptionPolicy."); } collectionName = UUID.randomUUID().toString(); containerProperties = new CosmosContainerProperties(collectionName, "/mypk/mypk1"); try { containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); containerResponse = createdDatabase.createContainer(containerProperties); fail("createContainer should fail as mypk which is part of the partition key cannot be included in the " + "ClientEncryptionPolicy."); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).isEqualTo("Path mypk which is part of the partition key cannot be included in" + " the ClientEncryptionPolicy."); } collectionName = UUID.randomUUID().toString(); containerProperties = new CosmosContainerProperties(collectionName, "/differentKey"); try { containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); List<String> keyPaths = new ArrayList<>(); keyPaths.add("/mypk"); partitionKeyDefinition.setPaths(keyPaths); containerProperties.setPartitionKeyDefinition(partitionKeyDefinition); containerResponse = createdDatabase.createContainer(containerProperties); fail("createContainer should fail as mypk which is part of the partition key cannot be included in the " + "ClientEncryptionPolicy."); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage()).isEqualTo("Path mypk which is part of the partition key cannot be included in" + " the ClientEncryptionPolicy."); } collectionName = UUID.randomUUID().toString(); containerProperties = new CosmosContainerProperties(collectionName, "/mypk1/mypk"); containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); containerResponse = createdDatabase.createContainer(containerProperties); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponseWithEncryption(containerProperties, containerResponse, clientEncryptionPolicy); } @DataProvider public static Object[][] analyticalTTLProvider() { return new Object[][]{ {-1}, {0}, {10}, {null} }; } @Test(groups = { "emulator" }, timeOut = TIMEOUT, dataProvider = "analyticalTTLProvider", enabled = false) public void createContainer_withAnalyticalTTL(Integer analyticalTTL) throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = new CosmosContainerProperties(collectionName, "/id"); containerProperties.setAnalyticalStoreTimeToLiveInSeconds(analyticalTTL); if (analyticalTTL != null && analyticalTTL > 0) { containerProperties.setDefaultTimeToLiveInSeconds(analyticalTTL - 1); } CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); assertThat(containerResponse.getRequestCharge()).isGreaterThan(0); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties().getAnalyticalStoreTimeToLiveInSeconds()).isEqualTo(analyticalTTL); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void createContainer_alreadyExists() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); try { createdDatabase.createContainer(containerProperties); } catch (Exception e) { assertThat(e).isInstanceOf(CosmosException.class); assertThat(((CosmosException) e).getStatusCode()).isEqualTo(HttpConstants.StatusCodes.CONFLICT); } } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withThroughput() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withFullFidelityChangeFeedPolicy() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); containerProperties.setChangeFeedPolicy( ChangeFeedPolicy.createFullFidelityPolicy( Duration.ofMinutes(8))); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ofMinutes(8)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withIncrementalChangeFeedPolicy() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createIncrementalPolicy()); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ZERO); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withDefaultChangeFeedPolicy() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ZERO); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, options); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withThroughputAndOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties, throughput, options); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withNameAndPartitionKeyPath() throws Exception { String collectionName = UUID.randomUUID().toString(); String partitionKeyPath = "/mypk"; CosmosContainerResponse containerResponse = createdDatabase.createContainer(collectionName, partitionKeyPath); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(new CosmosContainerProperties(collectionName, partitionKeyPath), containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void createContainer_withNamePartitionPathAndThroughput() throws Exception { String collectionName = UUID.randomUUID().toString(); String partitionKeyPath = "/mypk"; int throughput = 1000; CosmosContainerResponse containerResponse = createdDatabase.createContainer(collectionName, partitionKeyPath, ThroughputProperties.createManualThroughput(throughput)); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(new CosmosContainerProperties(collectionName, partitionKeyPath), containerResponse); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse read = syncContainer.read(); validateContainerResponse(containerProperties, read); CosmosContainerResponse read1 = syncContainer.read(options); validateContainerResponse(containerProperties, read1); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getFeedRanges() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); List<FeedRange> feedRanges = syncContainer.getFeedRanges(); assertThat(feedRanges) .isNotNull() .hasSize(1); assertFeedRange(feedRanges.get(0), "{\"Range\":{\"min\":\"\",\"max\":\"FF\"}}"); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void trySplitRanges_for_NonExistingContainer() throws Exception { CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosAsyncContainer nonExistingContainer = createdDatabase.getContainer("NonExistingContainer").asyncContainer; CosmosException cosmosException = null; try { List<FeedRangeEpkImpl> splitFeedRanges = nonExistingContainer.trySplitFeedRange( FeedRange.forFullRange(), 3 ).block(); } catch (CosmosException error) { cosmosException = error; } assertThat(cosmosException).isNotNull(); assertThat(cosmosException.getStatusCode()).isEqualTo(404); } private void assertFeedRange(FeedRange feedRange, String expectedJson) { assertThat(((FeedRangeInternal)feedRange).toJson()) .isNotNull() .isEqualTo(expectedJson); assertThat(feedRange.toString()) .isNotNull() .isEqualTo(Base64.getUrlEncoder().encodeToString(expectedJson.getBytes(StandardCharsets.UTF_8) )); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getNormalizedFeedRanges_HashV1() { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); createdDatabase.createContainer(containerProperties, options); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); FeedRange fullRange = FeedRange.forFullRange(); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(fullRange).block()) .isNotNull() .isEqualTo(new Range<>("", "FF", true, false)); Range<String> expectedRange = new Range<>("AA", "BB", true, false); FeedRange epkRange = new FeedRangeEpkImpl(expectedRange); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(epkRange).block()) .isNotNull() .isEqualTo(expectedRange); FeedRange pointEpkRange = new FeedRangeEpkImpl( new Range<>("05C1D5AB55AB54", "05C1D5AB55AB54", true, true)); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(pointEpkRange).block()) .isNotNull() .isEqualTo(new Range<>("05C1D5AB55AB54", "05C1D5AB55AB55", true, false)); FeedRange pkRangeIdRange = new FeedRangePartitionKeyRangeImpl("0"); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(pkRangeIdRange).block()) .isNotNull() .isEqualTo(new Range<>("", "FF", true, false)); FeedRange logicalPartitionFeedRange = FeedRange.forLogicalPartition(new PartitionKey("Hello World")); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(logicalPartitionFeedRange).block()) .isNotNull() .isEqualTo(new Range<>( "05C1C5D58F13B00849666D6D70215870736D6500", "05C1C5D58F13B00849666D6D70215870736D6501", true, false)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getNormalizedFeedRanges_HashV2() { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinitionForHashV2(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); createdDatabase.createContainer(containerProperties, options); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); FeedRange fullRange = FeedRange.forFullRange(); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(fullRange).block()) .isNotNull() .isEqualTo(new Range<>("", "FF", true, false)); Range<String> expectedRange = new Range<>("AA", "BB", true, false); FeedRange epkRange = new FeedRangeEpkImpl(expectedRange); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(epkRange).block()) .isNotNull() .isEqualTo(expectedRange); FeedRange pointEpkRange = new FeedRangeEpkImpl( new Range<>("05C1D5AB55AB54", "05C1D5AB55AB54", true, true)); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(pointEpkRange).block()) .isNotNull() .isEqualTo(new Range<>("05C1D5AB55AB54", "05C1D5AB55AB55", true, false)); FeedRange pkRangeIdRange = new FeedRangePartitionKeyRangeImpl("0"); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(pkRangeIdRange).block()) .isNotNull() .isEqualTo(new Range<>("", "FF", true, false)); FeedRange logicalPartitionFeedRange = FeedRange.forLogicalPartition(new PartitionKey("Hello World")); assertThat(syncContainer.asyncContainer.getNormalizedEffectiveRange(logicalPartitionFeedRange).block()) .isNotNull() .isEqualTo(new Range<>( "306C52B42DECB3AE9D3C7586975E30B9", "306C52B42DECB3AE9D3C7586975E30BA", true, false)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getFeedRanges_withMultiplePartitions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(18000)); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); List<FeedRange> feedRanges = syncContainer.getFeedRanges(); assertThat(feedRanges) .isNotNull() .hasSize(3); assertFeedRange(feedRanges.get(0), "{\"Range\":{\"min\":\"\",\"max\":\"05C1D5AB55AB54\"}}"); assertFeedRange(feedRanges.get(1), "{\"Range\":{\"min\":\"05C1D5AB55AB54\",\"max\":\"05C1E5AB55AB54\"}}"); assertFeedRange(feedRanges.get(2), "{\"Range\":{\"min\":\"05C1E5AB55AB54\",\"max\":\"FF\"}}"); Range<String> firstEpkRange = getEffectiveRange(syncContainer, feedRanges.get(0)); Range<String> secondEpkRange = getEffectiveRange(syncContainer, feedRanges.get(1)); Range<String> thirdEpkRange = getEffectiveRange(syncContainer, feedRanges.get(2)); List<FeedRangeEpkImpl> feedRangesAfterSplit = syncContainer .asyncContainer .trySplitFeedRange(FeedRange.forFullRange(), 3) .block(); assertThat(feedRangesAfterSplit) .isNotNull() .hasSize(3); String leftMin = getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).getMin(); String rightMin = firstEpkRange.getMin(); String leftMax = getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).getMax(); String rightMax = firstEpkRange.getMax(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).equals(firstEpkRange)) .isTrue(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(1)).equals(secondEpkRange)) .isTrue(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(2)).equals(thirdEpkRange)) .isTrue(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void getFeedRanges_withMultiplePartitions_HashV2() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinitionForHashV2(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(18000)); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); List<FeedRange> feedRanges = syncContainer.getFeedRanges(); assertThat(feedRanges) .isNotNull() .hasSize(3); assertFeedRange( feedRanges.get(0), "{\"Range\":{\"min\":\"\",\"max\":\"15555555555555555555555555555555\"}}"); assertFeedRange( feedRanges.get(1), "{\"Range\":{\"min\":\"15555555555555555555555555555555\"," + "\"max\":\"2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\"}}"); assertFeedRange( feedRanges.get(2), "{\"Range\":{\"min\":\"2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\",\"max\":\"FF\"}}"); Range<String> firstEpkRange = getEffectiveRange(syncContainer, feedRanges.get(0)); Range<String> secondEpkRange = getEffectiveRange(syncContainer, feedRanges.get(1)); Range<String> thirdEpkRange = getEffectiveRange(syncContainer, feedRanges.get(2)); List<FeedRangeEpkImpl> feedRangesAfterSplit = syncContainer .asyncContainer .trySplitFeedRange(FeedRange.forFullRange(), 3) .block(); assertThat(feedRangesAfterSplit) .isNotNull() .hasSize(3); String leftMin = getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).getMin(); String rightMin = firstEpkRange.getMin(); String leftMax = getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).getMax(); String rightMax = firstEpkRange.getMax(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(0)).equals(firstEpkRange)) .isTrue(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(1)).equals(secondEpkRange)) .isTrue(); assertThat(getEffectiveRange(syncContainer, feedRangesAfterSplit.get(2)).equals(thirdEpkRange)) .isTrue(); } private static Range<String> getEffectiveRange(CosmosContainer container, FeedRange feedRange) { AsyncDocumentClient clientWrapper = container.asyncContainer.getDatabase().getDocClientWrapper(); return FeedRangeInternal .convert(feedRange) .getNormalizedEffectiveRange( clientWrapper.getPartitionKeyRangeCache(), null, Mono.just(Utils.ValueHolder.initialize( clientWrapper.getCollectionCache().resolveByNameAsync( null, container.asyncContainer.getLink(), null ).block()))).block(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void deleteContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse deleteResponse = syncContainer.delete(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void deleteContainer_withOptions() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); CosmosContainer syncContainer = createdDatabase.getContainer(collectionName); CosmosContainerResponse deleteResponse = syncContainer.delete(options); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void replace() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); this.createdContainer = createdDatabase.getContainer(collectionName); assertThat(containerResponse.getProperties().getIndexingPolicy().getIndexingMode()).isEqualTo(IndexingMode.CONSISTENT); CosmosContainerResponse replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse.getProperties().setIndexingPolicy( new IndexingPolicy().setAutomatic(false).setIndexingMode(IndexingMode.NONE))); assertThat(replaceResponse.getProperties().getIndexingPolicy().getIndexingMode()) .isEqualTo(IndexingMode.NONE); assertThat(replaceResponse.getProperties().getIndexingPolicy().isAutomatic()) .isEqualTo(false); replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse.getProperties().setIndexingPolicy( new IndexingPolicy().setAutomatic(true).setIndexingMode(IndexingMode.CONSISTENT)), options); assertThat(replaceResponse.getProperties().getIndexingPolicy().getIndexingMode()) .isEqualTo(IndexingMode.CONSISTENT); assertThat(replaceResponse.getProperties().getIndexingPolicy().isAutomatic()) .isEqualTo(true); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void enableFullFidelityChangeFeedForExistingContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); validateContainerResponse(containerProperties, containerResponse); this.createdContainer = createdDatabase.getContainer(collectionName); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ZERO); CosmosContainerResponse replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse .getProperties() .setChangeFeedPolicy( ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(4)))); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ofMinutes(4)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void changeFullFidelityChangeFeedRetentionDurationForExistingContainer() throws Exception { String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); containerProperties.setChangeFeedPolicy(ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(3))); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); validateContainerResponse(containerProperties, containerResponse); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ofMinutes(3)); CosmosContainerResponse replaceResponse = createdDatabase.getContainer(containerProperties.getId()) .replace(containerResponse .getProperties() .setChangeFeedPolicy( ChangeFeedPolicy.createFullFidelityPolicy(Duration.ofMinutes(6)))); assertThat(containerResponse.getProperties()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy()).isNotNull(); assertThat(containerResponse.getProperties().getChangeFeedPolicy().getFullFidelityRetentionDuration()) .isEqualTo(Duration.ofMinutes(6)); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void readAllContainers() throws Exception{ String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator = createdDatabase.readAllContainers(); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator1 = createdDatabase.readAllContainers(cosmosQueryRequestOptions); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) public void queryContainer() throws Exception{ String collectionName = UUID.randomUUID().toString(); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName); CosmosContainerRequestOptions options = new CosmosContainerRequestOptions(); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties); this.createdContainer = createdDatabase.getContainer(collectionName); String query = String.format("SELECT * from c where c.id = '%s'", collectionName); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator = createdDatabase.queryContainers(query); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator1 = createdDatabase.queryContainers(query, cosmosQueryRequestOptions); assertThat(feedResponseIterator1.iterator().hasNext()).isTrue(); SqlQuerySpec querySpec = new SqlQuerySpec(query); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator2 = createdDatabase.queryContainers(querySpec); assertThat(feedResponseIterator2.iterator().hasNext()).isTrue(); CosmosPagedIterable<CosmosContainerProperties> feedResponseIterator3 = createdDatabase.queryContainers(querySpec, cosmosQueryRequestOptions); assertThat(feedResponseIterator3.iterator().hasNext()).isTrue(); } private void validateContainerResponse(CosmosContainerProperties containerProperties, CosmosContainerResponse createResponse) { assertThat(createResponse.getProperties().getId()).isNotNull(); assertThat(createResponse.getProperties().getId()) .as("check Resource Id") .isEqualTo(containerProperties.getId()); } private void validateContainerResponseWithEncryption(CosmosContainerProperties containerProperties, CosmosContainerResponse createResponse, ClientEncryptionPolicy clientEncryptionPolicy) { validateContainerResponse(containerProperties, createResponse); assertThat(createResponse.getProperties().getClientEncryptionPolicy()).isNotNull(); assertThat(createResponse.getProperties().getClientEncryptionPolicy().getIncludedPaths().size()).isEqualTo(clientEncryptionPolicy.getIncludedPaths().size()); for (ClientEncryptionIncludedPath clientEncryptionIncludedPath : createResponse.getProperties().getClientEncryptionPolicy().getIncludedPaths()) { for (ClientEncryptionIncludedPath includedPath : clientEncryptionPolicy.getIncludedPaths()) { if (clientEncryptionIncludedPath.getPath().equals(includedPath.getPath())) { assertThat(clientEncryptionIncludedPath.getClientEncryptionKeyId()).isEqualTo(includedPath.getClientEncryptionKeyId()); assertThat(clientEncryptionIncludedPath.getEncryptionAlgorithm()).isEqualTo(includedPath.getEncryptionAlgorithm()); assertThat(clientEncryptionIncludedPath.getEncryptionType()).isEqualTo(includedPath.getEncryptionType()); break; } } } } private void createEncryptionKey() { EncryptionKeyWrapMetadata encryptionKeyWrapMetadata = new EncryptionKeyWrapMetadata("key1", "tempmetadata1", "custom"); byte[] key = Hex.decode(("34 62 52 77 f9 ee 11 9f 04 8c 6f 50 9c e4 c2 5b b3 39 f4 d0 4d c1 6a 32 fa 2b 3b aa " + "ae 1e d9 1c").replace(" ", "")); CosmosClientEncryptionKeyProperties cosmosClientEncryptionKeyProperties1 = new CosmosClientEncryptionKeyProperties("containerTestKey1", "AEAD_AES_256_CBC_HMAC_SHA256", key, encryptionKeyWrapMetadata); CosmosClientEncryptionKeyProperties cosmosClientEncryptionKeyProperties2 = new CosmosClientEncryptionKeyProperties("containerTestKey2", "AEAD_AES_256_CBC_HMAC_SHA256", key, encryptionKeyWrapMetadata); client.asyncClient().getDatabase(createdDatabase.getId()).createClientEncryptionKey(cosmosClientEncryptionKeyProperties1).block(); client.asyncClient().getDatabase(createdDatabase.getId()).createClientEncryptionKey(cosmosClientEncryptionKeyProperties2).block(); } }
This constructor expects an encoded path
public DataLakeFileAsyncClient getFileAsyncClient(String fileName) { Objects.requireNonNull(fileName, "'fileName' can not be set to null"); BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(fileName).buildBlockBlobAsyncClient(); return new DataLakeFileAsyncClient(getHttpPipeline(), StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(fileName))).toString(), getServiceVersion(), getAccountName(), getFileSystemName(), Utility.urlEncode(getObjectPath() + "/" + Utility.urlDecode(fileName)), blockBlobAsyncClient); }
getServiceVersion(), getAccountName(), getFileSystemName(), Utility.urlEncode(getObjectPath() + "/"
public DataLakeFileAsyncClient getFileAsyncClient(String fileName) { Objects.requireNonNull(fileName, "'fileName' can not be set to null"); BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(fileName).buildBlockBlobAsyncClient(); return new DataLakeFileAsyncClient(getHttpPipeline(), StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(fileName))).toString(), getServiceVersion(), getAccountName(), getFileSystemName(), Utility.urlEncode(getObjectPath() + "/" + Utility.urlDecode(fileName)), blockBlobAsyncClient); }
class DataLakeDirectoryAsyncClient extends DataLakePathAsyncClient { private final ClientLogger logger = new ClientLogger(DataLakeDirectoryAsyncClient.class); /** * Package-private constructor for use by {@link DataLakePathClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param fileSystemName The file system name. * @param directoryName The directory name. * @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient} */ DataLakeDirectoryAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName, String fileSystemName, String directoryName, BlockBlobAsyncClient blockBlobAsyncClient) { super(pipeline, url, serviceVersion, accountName, fileSystemName, directoryName, PathResourceType.DIRECTORY, blockBlobAsyncClient); } DataLakeDirectoryAsyncClient(DataLakePathAsyncClient dataLakePathAsyncClient) { super(dataLakePathAsyncClient.getHttpPipeline(), dataLakePathAsyncClient.getPathUrl(), dataLakePathAsyncClient.getServiceVersion(), dataLakePathAsyncClient.getAccountName(), dataLakePathAsyncClient.getFileSystemName(), dataLakePathAsyncClient.pathName, PathResourceType.DIRECTORY, dataLakePathAsyncClient.getBlockBlobAsyncClient()); } /** * Gets the URL of the directory represented by this client on the Data Lake service. * * @return the URL. */ public String getDirectoryUrl() { return getPathUrl(); } /** * Gets the path of this directory, not including the name of the resource itself. * * @return The path of the directory. */ public String getDirectoryPath() { return getObjectPath(); } /** * Gets the name of this directory, not including its full path. * * @return The name of the directory. */ public String getDirectoryName() { return getObjectName(); } /** * Deletes a directory. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(false, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a directory. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param recursive Whether or not to delete all paths beneath the directory. * @param requestConditions {@link DataLakeRequestConditions} * * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(boolean recursive, DataLakeRequestConditions requestConditions) { try { return withContext(context -> deleteWithResponse(recursive, requestConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new DataLakeFileAsyncClient object by concatenating fileName to the end of * DataLakeDirectoryAsyncClient's URL. The new DataLakeFileAsyncClient uses the same request policy pipeline as the * DataLakeDirectoryAsyncClient. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient * * @param fileName A {@code String} representing the name of the file. * @return A new {@link DataLakeFileAsyncClient} object which references the file with the specified name in this * file system. */ /** * Creates a new file within a directory. By default this method will not overwrite an existing file. * For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * * @param fileName Name of the file to create. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. */ public Mono<DataLakeFileAsyncClient> createFile(String fileName) { return createFile(fileName, false); } /** * Creates a new file within a directory. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * * @param fileName Name of the file to create. * @param overwrite Whether or not to overwrite, should the file exist. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. */ public Mono<DataLakeFileAsyncClient> createFile(String fileName, boolean overwrite) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions(); if (!overwrite) { requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } try { return createFileWithResponse(fileName, null, null, null, null, requestConditions) .flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file within a directory. If a file with the same name already exists, the file will be * overwritten. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse * * @param fileName Name of the file to create. * @param permissions POSIX access permissions for the file owner, the file owning group, and others. * @param umask Restricts permissions of the file to be created. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the file. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeFileAsyncClient} used to interact with the file created. */ public Mono<Response<DataLakeFileAsyncClient>> createFileWithResponse(String fileName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { try { DataLakeFileAsyncClient dataLakeFileAsyncClient = getFileAsyncClient(fileName); return dataLakeFileAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions) .map(response -> new SimpleResponse<>(response, dataLakeFileAsyncClient)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified file in the file system. If the file doesn't exist the operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile * * @param fileName Name of the file to delete. * @return A reactive response signalling completion. */ public Mono<Void> deleteFile(String fileName) { try { return deleteFileWithResponse(fileName, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified file in the directory. If the file doesn't exist the operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse * * @param fileName Name of the file to delete. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing containing status code and HTTP headers */ public Mono<Response<Void>> deleteFileWithResponse(String fileName, DataLakeRequestConditions requestConditions) { try { return getFileAsyncClient(fileName).deleteWithResponse(requestConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new DataLakeDirectoryAsyncClient object by concatenating subdirectoryName to the end of * DataLakeDirectoryAsyncClient's URL. The new DataLakeDirectoryAsyncClient uses the same request policy pipeline * as the DataLakeDirectoryAsyncClient. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubdirectoryAsyncClient * * @param subdirectoryName A {@code String} representing the name of the sub-directory. * @return A new {@link DataLakeDirectoryAsyncClient} object which references the directory with the specified name * in this file system. */ public DataLakeDirectoryAsyncClient getSubdirectoryAsyncClient(String subdirectoryName) { Objects.requireNonNull(subdirectoryName, "'subdirectoryName' can not be set to null"); BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(subdirectoryName) .buildBlockBlobAsyncClient(); return new DataLakeDirectoryAsyncClient(getHttpPipeline(), StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(subdirectoryName))) .toString(), getServiceVersion(), getAccountName(), getFileSystemName(), Utility.urlEncode(getObjectPath() + "/" + Utility.urlDecode(subdirectoryName)), blockBlobAsyncClient); } /** * Creates a new sub-directory within a directory. By default this method will not overwrite an existing * sub-directory. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * * @param subdirectoryName Name of the sub-directory to create. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory * created. */ public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName) { return createSubdirectory(subdirectoryName, false); } /** * Creates a new sub-directory within a directory. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * * @param subdirectoryName Name of the sub-directory to create. * @param overwrite Whether or not to overwrite, should the sub directory exist. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory * created. */ public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName, boolean overwrite) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions(); if (!overwrite) { requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } try { return createSubdirectoryWithResponse(subdirectoryName, null, null, null, null, requestConditions) .flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new sub-directory within a directory. If a sub-directory with the same name already exists, the * sub-directory will be overwritten. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryWithResponse * * @param subdirectoryName Name of the sub-directory to create. * @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and * others. * @param umask Restricts permissions of the sub-directory to be created. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeDirectoryAsyncClient} used to interact with the sub-directory created. */ public Mono<Response<DataLakeDirectoryAsyncClient>> createSubdirectoryWithResponse(String subdirectoryName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { try { DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient = getSubdirectoryAsyncClient(subdirectoryName); return dataLakeDirectoryAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions).map(response -> new SimpleResponse<>(response, dataLakeDirectoryAsyncClient)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the * operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectory * * @param subdirectoryName Name of the sub-directory to delete. * @return A reactive response signalling completion. */ public Mono<Void> deleteSubdirectory(String subdirectoryName) { try { return deleteSubdirectoryWithResponse(subdirectoryName, false, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the * operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryWithResponse * * @param directoryName Name of the sub-directory to delete. * @param recursive Whether or not to delete all paths beneath the sub-directory. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing containing status code and HTTP headers */ public Mono<Response<Void>> deleteSubdirectoryWithResponse(String directoryName, boolean recursive, DataLakeRequestConditions requestConditions) { try { return getSubdirectoryAsyncClient(directoryName).deleteWithResponse(recursive, requestConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Moves the directory to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the new directory * created. */ public Mono<DataLakeDirectoryAsyncClient> rename(String destinationFileSystem, String destinationPath) { try { return renameWithResponse(destinationFileSystem, destinationPath, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Moves the directory to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeDirectoryAsyncClient} used to interact with the directory created. */ public Mono<Response<DataLakeDirectoryAsyncClient>> renameWithResponse(String destinationFileSystem, String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions) { try { return withContext(context -> renameWithResponse(destinationFileSystem, destinationPath, sourceRequestConditions, destinationRequestConditions, context)).map( response -> new SimpleResponse<>(response, new DataLakeDirectoryAsyncClient(response.getValue()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more * information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths} * * @return A reactive response emitting the list of files/directories. */ public PagedFlux<PathItem> listPaths() { return this.listPaths(false, false, null); } /** * Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more * information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths * * @param recursive Specifies if the call should recursively include all paths. * @param userPrincipleNameReturned If "true", the user identity values returned in the x-ms-owner, x-ms-group, * and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. * If "false", the values will be returned as Azure Active Directory Object IDs. * The default value is false. Note that group and application Object IDs are not translated because they do not * have unique friendly names. * @param maxResults Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the * request does not specify maxResults or specifies a value greater than 5,000, the server will return up to * 5,000 items. * @return A reactive response emitting the list of files/directories. */ public PagedFlux<PathItem> listPaths(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults) { try { return listPathsWithOptionalTimeout(recursive, userPrincipleNameReturned, maxResults, null); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } PagedFlux<PathItem> listPathsWithOptionalTimeout(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) { Function<String, Mono<PagedResponse<Path>>> func = marker -> listPathsSegment(marker, recursive, userPrincipleNameReturned, maxResults, timeout) .map(response -> new PagedResponseBase<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue().getPaths(), response.getDeserializedHeaders().getContinuation(), response.getDeserializedHeaders())); return new PagedFlux<>(() -> func.apply(null), func).mapPage(Transforms::toPathItem); } private Mono<FileSystemsListPathsResponse> listPathsSegment(String marker, boolean recursive, boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) { return StorageImplUtils.applyOptionalTimeout( this.fileSystemDataLakeStorage.fileSystems().listPathsWithRestResponseAsync( recursive, marker, getDirectoryPath(), maxResults, userPrincipleNameReturned, null, null, Context.NONE), timeout); } /** * Prepares a SpecializedBlobClientBuilder with the pathname appended to the end of the current BlockBlobClient's * url * @param pathName The name of the path to append * @return {@link SpecializedBlobClientBuilder} */ SpecializedBlobClientBuilder prepareBuilderAppendPath(String pathName) { String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(getPathUrl(), "blob", "dfs"); return new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(StorageImplUtils.appendToUrlPath(blobUrl, pathName).toString()); } }
class DataLakeDirectoryAsyncClient extends DataLakePathAsyncClient { private final ClientLogger logger = new ClientLogger(DataLakeDirectoryAsyncClient.class); /** * Package-private constructor for use by {@link DataLakePathClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param fileSystemName The file system name. * @param directoryName The directory name. * @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient} */ DataLakeDirectoryAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName, String fileSystemName, String directoryName, BlockBlobAsyncClient blockBlobAsyncClient) { super(pipeline, url, serviceVersion, accountName, fileSystemName, directoryName, PathResourceType.DIRECTORY, blockBlobAsyncClient); } DataLakeDirectoryAsyncClient(DataLakePathAsyncClient dataLakePathAsyncClient) { super(dataLakePathAsyncClient.getHttpPipeline(), dataLakePathAsyncClient.getPathUrl(), dataLakePathAsyncClient.getServiceVersion(), dataLakePathAsyncClient.getAccountName(), dataLakePathAsyncClient.getFileSystemName(), dataLakePathAsyncClient.pathName, PathResourceType.DIRECTORY, dataLakePathAsyncClient.getBlockBlobAsyncClient()); } /** * Gets the URL of the directory represented by this client on the Data Lake service. * * @return the URL. */ public String getDirectoryUrl() { return getPathUrl(); } /** * Gets the path of this directory, not including the name of the resource itself. * * @return The path of the directory. */ public String getDirectoryPath() { return getObjectPath(); } /** * Gets the name of this directory, not including its full path. * * @return The name of the directory. */ public String getDirectoryName() { return getObjectName(); } /** * Deletes a directory. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(false, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a directory. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param recursive Whether or not to delete all paths beneath the directory. * @param requestConditions {@link DataLakeRequestConditions} * * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(boolean recursive, DataLakeRequestConditions requestConditions) { try { return withContext(context -> deleteWithResponse(recursive, requestConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new DataLakeFileAsyncClient object by concatenating fileName to the end of * DataLakeDirectoryAsyncClient's URL. The new DataLakeFileAsyncClient uses the same request policy pipeline as the * DataLakeDirectoryAsyncClient. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient * * @param fileName A {@code String} representing the name of the file. * @return A new {@link DataLakeFileAsyncClient} object which references the file with the specified name in this * file system. */ /** * Creates a new file within a directory. By default this method will not overwrite an existing file. * For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * * @param fileName Name of the file to create. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. */ public Mono<DataLakeFileAsyncClient> createFile(String fileName) { return createFile(fileName, false); } /** * Creates a new file within a directory. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile * * @param fileName Name of the file to create. * @param overwrite Whether or not to overwrite, should the file exist. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. */ public Mono<DataLakeFileAsyncClient> createFile(String fileName, boolean overwrite) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions(); if (!overwrite) { requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } try { return createFileWithResponse(fileName, null, null, null, null, requestConditions) .flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file within a directory. If a file with the same name already exists, the file will be * overwritten. For more information, see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse * * @param fileName Name of the file to create. * @param permissions POSIX access permissions for the file owner, the file owning group, and others. * @param umask Restricts permissions of the file to be created. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the file. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeFileAsyncClient} used to interact with the file created. */ public Mono<Response<DataLakeFileAsyncClient>> createFileWithResponse(String fileName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { try { DataLakeFileAsyncClient dataLakeFileAsyncClient = getFileAsyncClient(fileName); return dataLakeFileAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions) .map(response -> new SimpleResponse<>(response, dataLakeFileAsyncClient)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified file in the file system. If the file doesn't exist the operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile * * @param fileName Name of the file to delete. * @return A reactive response signalling completion. */ public Mono<Void> deleteFile(String fileName) { try { return deleteFileWithResponse(fileName, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified file in the directory. If the file doesn't exist the operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse * * @param fileName Name of the file to delete. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing containing status code and HTTP headers */ public Mono<Response<Void>> deleteFileWithResponse(String fileName, DataLakeRequestConditions requestConditions) { try { return getFileAsyncClient(fileName).deleteWithResponse(requestConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new DataLakeDirectoryAsyncClient object by concatenating subdirectoryName to the end of * DataLakeDirectoryAsyncClient's URL. The new DataLakeDirectoryAsyncClient uses the same request policy pipeline * as the DataLakeDirectoryAsyncClient. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubdirectoryAsyncClient * * @param subdirectoryName A {@code String} representing the name of the sub-directory. * @return A new {@link DataLakeDirectoryAsyncClient} object which references the directory with the specified name * in this file system. */ public DataLakeDirectoryAsyncClient getSubdirectoryAsyncClient(String subdirectoryName) { Objects.requireNonNull(subdirectoryName, "'subdirectoryName' can not be set to null"); BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(subdirectoryName) .buildBlockBlobAsyncClient(); return new DataLakeDirectoryAsyncClient(getHttpPipeline(), StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(subdirectoryName))) .toString(), getServiceVersion(), getAccountName(), getFileSystemName(), Utility.urlEncode(getObjectPath() + "/" + Utility.urlDecode(subdirectoryName)), blockBlobAsyncClient); } /** * Creates a new sub-directory within a directory. By default this method will not overwrite an existing * sub-directory. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * * @param subdirectoryName Name of the sub-directory to create. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory * created. */ public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName) { return createSubdirectory(subdirectoryName, false); } /** * Creates a new sub-directory within a directory. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory * * @param subdirectoryName Name of the sub-directory to create. * @param overwrite Whether or not to overwrite, should the sub directory exist. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory * created. */ public Mono<DataLakeDirectoryAsyncClient> createSubdirectory(String subdirectoryName, boolean overwrite) { DataLakeRequestConditions requestConditions = new DataLakeRequestConditions(); if (!overwrite) { requestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } try { return createSubdirectoryWithResponse(subdirectoryName, null, null, null, null, requestConditions) .flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new sub-directory within a directory. If a sub-directory with the same name already exists, the * sub-directory will be overwritten. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryWithResponse * * @param subdirectoryName Name of the sub-directory to create. * @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and * others. * @param umask Restricts permissions of the sub-directory to be created. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the resource. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeDirectoryAsyncClient} used to interact with the sub-directory created. */ public Mono<Response<DataLakeDirectoryAsyncClient>> createSubdirectoryWithResponse(String subdirectoryName, String permissions, String umask, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { try { DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient = getSubdirectoryAsyncClient(subdirectoryName); return dataLakeDirectoryAsyncClient.createWithResponse(permissions, umask, headers, metadata, requestConditions).map(response -> new SimpleResponse<>(response, dataLakeDirectoryAsyncClient)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the * operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectory * * @param subdirectoryName Name of the sub-directory to delete. * @return A reactive response signalling completion. */ public Mono<Void> deleteSubdirectory(String subdirectoryName) { try { return deleteSubdirectoryWithResponse(subdirectoryName, false, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the * operation fails. * For more information see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryWithResponse * * @param directoryName Name of the sub-directory to delete. * @param recursive Whether or not to delete all paths beneath the sub-directory. * @param requestConditions {@link DataLakeRequestConditions} * @return A {@link Mono} containing containing status code and HTTP headers */ public Mono<Response<Void>> deleteSubdirectoryWithResponse(String directoryName, boolean recursive, DataLakeRequestConditions requestConditions) { try { return getSubdirectoryAsyncClient(directoryName).deleteWithResponse(recursive, requestConditions); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Moves the directory to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the new directory * created. */ public Mono<DataLakeDirectoryAsyncClient> rename(String destinationFileSystem, String destinationPath) { try { return renameWithResponse(destinationFileSystem, destinationPath, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Moves the directory to another location within the file system. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeDirectoryAsyncClient} used to interact with the directory created. */ public Mono<Response<DataLakeDirectoryAsyncClient>> renameWithResponse(String destinationFileSystem, String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions) { try { return withContext(context -> renameWithResponse(destinationFileSystem, destinationPath, sourceRequestConditions, destinationRequestConditions, context)).map( response -> new SimpleResponse<>(response, new DataLakeDirectoryAsyncClient(response.getValue()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more * information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths} * * @return A reactive response emitting the list of files/directories. */ public PagedFlux<PathItem> listPaths() { return this.listPaths(false, false, null); } /** * Returns a reactive Publisher emitting all the files/directories in this directory lazily as needed. For more * information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths * * @param recursive Specifies if the call should recursively include all paths. * @param userPrincipleNameReturned If "true", the user identity values returned in the x-ms-owner, x-ms-group, * and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. * If "false", the values will be returned as Azure Active Directory Object IDs. * The default value is false. Note that group and application Object IDs are not translated because they do not * have unique friendly names. * @param maxResults Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the * request does not specify maxResults or specifies a value greater than 5,000, the server will return up to * 5,000 items. * @return A reactive response emitting the list of files/directories. */ public PagedFlux<PathItem> listPaths(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults) { try { return listPathsWithOptionalTimeout(recursive, userPrincipleNameReturned, maxResults, null); } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } PagedFlux<PathItem> listPathsWithOptionalTimeout(boolean recursive, boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) { Function<String, Mono<PagedResponse<Path>>> func = marker -> listPathsSegment(marker, recursive, userPrincipleNameReturned, maxResults, timeout) .map(response -> new PagedResponseBase<>( response.getRequest(), response.getStatusCode(), response.getHeaders(), response.getValue().getPaths(), response.getDeserializedHeaders().getContinuation(), response.getDeserializedHeaders())); return new PagedFlux<>(() -> func.apply(null), func).mapPage(Transforms::toPathItem); } private Mono<FileSystemsListPathsResponse> listPathsSegment(String marker, boolean recursive, boolean userPrincipleNameReturned, Integer maxResults, Duration timeout) { return StorageImplUtils.applyOptionalTimeout( this.fileSystemDataLakeStorage.fileSystems().listPathsWithRestResponseAsync( recursive, marker, getDirectoryPath(), maxResults, userPrincipleNameReturned, null, null, Context.NONE), timeout); } /** * Prepares a SpecializedBlobClientBuilder with the pathname appended to the end of the current BlockBlobClient's * url * @param pathName The name of the path to append * @return {@link SpecializedBlobClientBuilder} */ SpecializedBlobClientBuilder prepareBuilderAppendPath(String pathName) { String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(getPathUrl(), "blob", "dfs"); return new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(StorageImplUtils.appendToUrlPath(blobUrl, pathName).toString()); } }
does this have a perf implication? please do a quick perf test to see if there is any impact.
private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); }
DoubleHistogram copyHistogram = histogram.copy();
private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
No perf implication , this is just reference which i change from DoubleHistogram to ConcurrentDoubleHistogram , object was already ConcurrentDoubleHistogram in the code.
private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); }
DoubleHistogram copyHistogram = histogram.copy();
private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
Updating the comment , as my above reply was for different comment. New reply - "No perf impact, this is separate 10 min scheduled thread, wont interfere with current workload"
private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); }
DoubleHistogram copyHistogram = histogram.copy();
private void fillMetricsInfo(ReportPayload payload, ConcurrentDoubleHistogram histogram) { DoubleHistogram copyHistogram = histogram.copy(); payload.getMetricInfo().setCount(copyHistogram.getTotalCount()); payload.getMetricInfo().setMax(copyHistogram.getMaxValue()); payload.getMetricInfo().setMin(copyHistogram.getMinValue()); payload.getMetricInfo().setMean(copyHistogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, copyHistogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, copyHistogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, copyHistogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, copyHistogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, copyHistogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(ClientTelemetry.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(ConcurrentDoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (ConcurrentDoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
Update javadoc: "Deletes a subscription matching the {@code subscriptionName} in topic {@code topicName}."
public void deleteSubscription(String topicName, String subscriptionName) { asyncClient.deleteSubscription(topicName, subscriptionName).block(); }
asyncClient.deleteSubscription(topicName, subscriptionName).block();
public void deleteSubscription(String topicName, String subscriptionName) { asyncClient.deleteSubscription(topicName, subscriptionName).block(); }
class ServiceBusAdministrationClient { private final ServiceBusAdministrationAsyncClient asyncClient; /** * Creates a new instance with the given client. * * @param asyncClient Asynchronous client to perform management calls through. */ ServiceBusAdministrationClient(ServiceBusAdministrationAsyncClient asyncClient) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return The created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueProperties createQueue(String queueName) { return asyncClient.createQueue(queueName).block(); } /** * Creates a queue with the {@link CreateQueueOptions}. * * @param queueName Name of the queue to create. * @param queueOptions Information about the queue to create. * * @return The created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueProperties createQueue(String queueName, CreateQueueOptions queueOptions) { return asyncClient.createQueue(queueName, queueOptions).block(); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Information about the queue to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueProperties> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions, Context context) { return asyncClient.createQueueWithResponse(queueName, queueOptions, context != null ? context : Context.NONE) .block(); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return Information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public RuleProperties createRule(String topicName, String subscriptionName, String ruleName) { return asyncClient.createRule(topicName, subscriptionName, ruleName).block(); } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return Information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public RuleProperties createRule(String topicName, String ruleName, String subscriptionName, CreateRuleOptions ruleOptions) { return asyncClient.createRule(topicName, subscriptionName, ruleName, ruleOptions).block(); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<RuleProperties> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { return asyncClient.createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context).block(); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return Information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionProperties createSubscription(String topicName, String subscriptionName) { return asyncClient.createSubscription(topicName, subscriptionName).block(); } /** * Creates a subscription with the {@link SubscriptionProperties}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return Information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionProperties createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return asyncClient.createSubscription(topicName, subscriptionName, subscriptionOptions).block(); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws NullPointerException if {@code subscription} is null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SubscriptionProperties> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { return asyncClient.createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context != null ? context : Context.NONE).block(); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return Information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicProperties createTopic(String topicName) { return asyncClient.createTopic(topicName).block(); } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions Information about the topic to create. * * @return Information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topicOptions quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicProperties createTopic(String topicName, CreateTopicOptions topicOptions) { return asyncClient.createTopic(topicName, topicOptions).block(); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions Information about the topic to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TopicProperties> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { return asyncClient.createTopicWithResponse(topicName, topicOptions, context != null ? context : Context.NONE) .block(); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteQueue(String queueName) { asyncClient.deleteQueue(queueName).block(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response when the queue is successfully deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteQueueWithResponse(String queueName, Context context) { return asyncClient.deleteQueueWithResponse(queueName, context != null ? context : Context.NONE).block(); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteRule(String topicName, String subscriptionName, String ruleName) { asyncClient.deleteRule(topicName, subscriptionName, ruleName).block(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { return asyncClient.deleteRuleWithResponse(topicName, subscriptionName, ruleName, context != null ? context : Context.NONE).block(); } /** * Deletes a subscription the matching {@code subscriptionName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { return asyncClient.deleteSubscriptionWithResponse(topicName, subscriptionName, context != null ? context : Context.NONE).block(); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteTopic(String topicName) { asyncClient.deleteTopic(topicName).block(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteTopicWithResponse(String topicName, Context context) { return asyncClient.deleteTopicWithResponse(topicName, context != null ? context : Context.NONE).block(); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return Information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueProperties getQueue(String queueName) { return asyncClient.getQueue(queueName).block(); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueProperties> getQueueWithResponse(String queueName, Context context) { return asyncClient.getQueueWithResponse(queueName, context != null ? context : Context.NONE, Function.identity()).block(); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return {@code true} if the queue exists; otherwise {@code false}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public boolean getQueueExists(String queueName) { final Boolean exists = asyncClient.getQueueExists(queueName).block(); return exists != null && exists; } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response and {@code true} if the queue exists; otherwise {@code false}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> getQueueExistsWithResponse(String queueName, Context context) { final Mono<Response<QueueProperties>> queueWithResponse = asyncClient.getQueueWithResponse(queueName, context != null ? context : Context.NONE, Function.identity()); return asyncClient.getEntityExistsWithResponse(queueWithResponse).block(); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return Runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueRuntimeProperties getQueueRuntimeProperties(String queueName) { return asyncClient.getQueueRuntimeProperties(queueName).block(); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueRuntimeProperties> getQueueRuntimePropertiesWithResponse(String queueName, Context context) { return asyncClient.getQueueWithResponse(queueName, context != null ? context : Context.NONE, QueueRuntimeProperties::new).block(); } /** * Gets information about the Service Bus namespace. * * @return Information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public NamespaceProperties getNamespaceProperties() { return asyncClient.getNamespaceProperties().block(); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<NamespaceProperties> getNamespacePropertiesWithResponse(Context context) { return asyncClient.getNamespacePropertiesWithResponse(context).block(); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public RuleProperties getRule(String topicName, String subscriptionName, String ruleName) { return asyncClient.getRule(topicName, subscriptionName, ruleName).block(); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<RuleProperties> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { return asyncClient.getRuleWithResponse(topicName, subscriptionName, ruleName, context != null ? context : Context.NONE).block(); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return Information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionProperties getSubscription(String topicName, String subscriptionName) { return asyncClient.getSubscription(topicName, subscriptionName).block(); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SubscriptionProperties> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { return asyncClient.getSubscriptionWithResponse(topicName, subscriptionName, context != null ? context : Context.NONE, Function.identity()).block(); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return {@code true} if the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public boolean getSubscriptionExists(String topicName, String subscriptionName) { final Boolean exists = asyncClient.getSubscriptionExists(topicName, subscriptionName).block(); return exists != null && exists; } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response and {@code true} if the subscription exists; otherwise {@code false}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> getSubscriptionExistsWithResponse(String topicName, String subscriptionName, Context context) { final Mono<Response<SubscriptionProperties>> subscriptionWithResponse = asyncClient.getSubscriptionWithResponse(topicName, subscriptionName, context != null ? context : Context.NONE, Function.identity()); return asyncClient.getEntityExistsWithResponse(subscriptionWithResponse).block(); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return Runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionRuntimeProperties getSubscriptionRuntimeProperties(String topicName, String subscriptionName) { return asyncClient.getSubscriptionRuntimeProperties(topicName, subscriptionName).block(); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SubscriptionRuntimeProperties> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName, Context context) { return asyncClient.getSubscriptionWithResponse(topicName, subscriptionName, context != null ? context : Context.NONE, SubscriptionRuntimeProperties::new).block(); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return Information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicProperties getTopic(String topicName) { return asyncClient.getTopic(topicName).block(); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TopicProperties> getTopicWithResponse(String topicName, Context context) { return asyncClient.getTopicWithResponse(topicName, context != null ? context : Context.NONE, Function.identity()).block(); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return {@code true} if the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public boolean getTopicExists(String topicName) { final Boolean exists = asyncClient.getTopicExists(topicName).block(); return exists != null && exists; } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response and {@code true} if the topic exists; otherwise {@code false}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> getTopicExistsWithResponse(String topicName, Context context) { final Mono<Response<TopicProperties>> topicWithResponse = asyncClient.getTopicWithResponse(topicName, context != null ? context : Context.NONE, Function.identity()); return asyncClient.getEntityExistsWithResponse(topicWithResponse).block(); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return Runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicRuntimeProperties getTopicRuntimeProperties(String topicName) { return asyncClient.getTopicRuntimeProperties(topicName).block(); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TopicRuntimeProperties> getTopicRuntimePropertiesWithResponse(String topicName, Context context) { return asyncClient.getTopicWithResponse(topicName, context != null ? context : Context.NONE, TopicRuntimeProperties::new).block(); } /** * Fetches all the queues in the Service Bus namespace. * * @return A PagedIterable of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * Authorization Rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueProperties> listQueues() { return new PagedIterable<>(asyncClient.listQueues()); } /** * Fetches all the queues in the Service Bus namespace. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A PagedIterable of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * Authorization Rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueProperties> listQueues(Context context) { final PagedFlux<QueueProperties> pagedFlux = new PagedFlux<>( () -> asyncClient.listQueuesFirstPage(context), continuationToken -> asyncClient.listQueuesNextPage(continuationToken, context != null ? context : Context.NONE)); return new PagedIterable<>(pagedFlux); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return An iterable of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<RuleProperties> listRules(String topicName, String subscriptionName) { return new PagedIterable<>(asyncClient.listRules(topicName, subscriptionName)); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A paged iterable of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<SubscriptionProperties> listSubscriptions(String topicName) { return new PagedIterable<>(asyncClient.listSubscriptions(topicName)); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A paged iterable of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<SubscriptionProperties> listSubscriptions(String topicName, Context context) { final PagedFlux<SubscriptionProperties> pagedFlux = new PagedFlux<>( () -> asyncClient.listSubscriptionsFirstPage(topicName, context != null ? context : Context.NONE), continuationToken -> asyncClient.listSubscriptionsNextPage(topicName, continuationToken, context != null ? context : Context.NONE)); return new PagedIterable<>(pagedFlux); } /** * Fetches all the topics in the Service Bus namespace. * * @return A paged iterable of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<TopicProperties> listTopics() { return new PagedIterable<>(asyncClient.listTopics()); } /** * Fetches all the topics in the Service Bus namespace. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A paged iterable of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<TopicProperties> listTopics(Context context) { final PagedFlux<TopicProperties> pagedFlux = new PagedFlux<>( () -> asyncClient.listTopicsFirstPage(context), continuationToken -> asyncClient.listTopicsNextPage(continuationToken, context != null ? context : Context.NONE)); return new PagedIterable<>(pagedFlux); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return The updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueProperties updateQueue(QueueProperties queue) { return asyncClient.updateQueue(queue).block(); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The updated queue with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueProperties> updateQueueWithResponse(QueueProperties queue, Context context) { return asyncClient.updateQueueWithResponse(queue, context != null ? context : Context.NONE).block(); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return The updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public RuleProperties updateRule(String topicName, String subscriptionName, RuleProperties rule) { return asyncClient.updateRule(topicName, subscriptionName, rule).block(); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<RuleProperties> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { return asyncClient.updateRuleWithResponse(topicName, subscriptionName, rule, context != null ? context : Context.NONE).block(); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return Updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionProperties updateSubscription(SubscriptionProperties subscription) { return asyncClient.updateSubscription(subscription).block(); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SubscriptionProperties> updateSubscriptionWithResponse( SubscriptionProperties subscription, Context context) { return asyncClient.updateSubscriptionWithResponse(subscription, context != null ? context : Context.NONE) .block(); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return The updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicProperties updateTopic(TopicProperties topic) { return asyncClient.updateTopic(topic).block(); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The updated topic with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TopicProperties> updateTopicWithResponse(TopicProperties topic, Context context) { return asyncClient.updateTopicWithResponse(topic, context != null ? context : Context.NONE).block(); } }
class ServiceBusAdministrationClient { private final ServiceBusAdministrationAsyncClient asyncClient; /** * Creates a new instance with the given client. * * @param asyncClient Asynchronous client to perform management calls through. */ ServiceBusAdministrationClient(ServiceBusAdministrationAsyncClient asyncClient) { this.asyncClient = Objects.requireNonNull(asyncClient, "'asyncClient' cannot be null."); } /** * Creates a queue with the given name. * * @param queueName Name of the queue to create. * * @return The created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queueName} is null. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws ResourceExistsException if a queue exists with the same {@code queueName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueProperties createQueue(String queueName) { return asyncClient.createQueue(queueName).block(); } /** * Creates a queue with the {@link CreateQueueOptions}. * * @param queueName Name of the queue to create. * @param queueOptions Information about the queue to create. * * @return The created queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueProperties createQueue(String queueName, CreateQueueOptions queueOptions) { return asyncClient.createQueue(queueName, queueOptions).block(); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param queueName Name of the queue to create. * @param queueOptions Information about the queue to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created queue in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @throws ResourceExistsException if a queue exists with the same {@link QueueProperties * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueProperties> createQueueWithResponse(String queueName, CreateQueueOptions queueOptions, Context context) { return asyncClient.createQueueWithResponse(queueName, queueOptions, context != null ? context : Context.NONE) .block(); } /** * Creates a rule under the given topic and subscription * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * * @return Information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName} or {@code ruleName} are are null. * @throws ResourceExistsException if a rule exists with the same topic, subscription, and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public RuleProperties createRule(String topicName, String subscriptionName, String ruleName) { return asyncClient.createRule(topicName, subscriptionName, ruleName).block(); } /** * Creates a rule with the {@link CreateRuleOptions}. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * * @return Information about the created rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public RuleProperties createRule(String topicName, String ruleName, String subscriptionName, CreateRuleOptions ruleOptions) { return asyncClient.createRule(topicName, subscriptionName, ruleName, ruleOptions).block(); } /** * Creates a rule and returns the created rule in addition to the HTTP response. * * @param topicName Name of the topic associated with rule. * @param subscriptionName Name of the subscription associated with the rule. * @param ruleName Name of the rule. * @param ruleOptions Information about the rule to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code ruleName}, or {@code ruleOptions} are are null. * @throws ResourceExistsException if a rule exists with the same topic and rule name. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<RuleProperties> createRuleWithResponse(String topicName, String subscriptionName, String ruleName, CreateRuleOptions ruleOptions, Context context) { return asyncClient.createRuleWithResponse(topicName, subscriptionName, ruleName, ruleOptions, context).block(); } /** * Creates a subscription with the given topic and subscription names. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return Information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionProperties createSubscription(String topicName, String subscriptionName) { return asyncClient.createSubscription(topicName, subscriptionName).block(); } /** * Creates a subscription with the {@link SubscriptionProperties}. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * * @return Information about the created subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are are empty strings. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code subscriptionOptions} * are are null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionProperties createSubscription(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions) { return asyncClient.createSubscription(topicName, subscriptionName, subscriptionOptions).block(); } /** * Creates a queue and returns the created queue in addition to the HTTP response. * * @param topicName Name of the topic associated with subscription. * @param subscriptionName Name of the subscription. * @param subscriptionOptions Information about the subscription to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the quota is exceeded, or an error occurred * processing the request. * @throws NullPointerException if {@code subscription} is null. * @throws ResourceExistsException if a subscription exists with the same topic and subscription name. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SubscriptionProperties> createSubscriptionWithResponse(String topicName, String subscriptionName, CreateSubscriptionOptions subscriptionOptions, Context context) { return asyncClient.createSubscriptionWithResponse(topicName, subscriptionName, subscriptionOptions, context != null ? context : Context.NONE).block(); } /** * Creates a topic with the given name. * * @param topicName Name of the topic to create. * * @return Information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicProperties createTopic(String topicName) { return asyncClient.createTopic(topicName).block(); } /** * Creates a topic with the {@link CreateTopicOptions}. * * @param topicName Name of the topic to create. * @param topicOptions Information about the topic to create. * * @return Information about the created topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topicOptions quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicProperties createTopic(String topicName, CreateTopicOptions topicOptions) { return asyncClient.createTopic(topicName, topicOptions).block(); } /** * Creates a topic and returns the created topic in addition to the HTTP response. * * @param topicName Name of the topic to create. * @param topicOptions Information about the topic to create. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The created topic in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code topicOptions} is null. * @throws ResourceExistsException if a topic exists with the same {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TopicProperties> createTopicWithResponse(String topicName, CreateTopicOptions topicOptions, Context context) { return asyncClient.createTopicWithResponse(topicName, topicOptions, context != null ? context : Context.NONE) .block(); } /** * Deletes a queue the matching {@code queueName}. * * @param queueName Name of queue to delete. * * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteQueue(String queueName) { asyncClient.deleteQueue(queueName).block(); } /** * Deletes a queue the matching {@code queueName} and returns the HTTP response. * * @param queueName Name of queue to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response when the queue is successfully deleted. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteQueueWithResponse(String queueName, Context context) { return asyncClient.deleteQueueWithResponse(queueName, context != null ? context : Context.NONE).block(); } /** * Deletes a rule the matching {@code ruleName}. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code ruleName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteRule(String topicName, String subscriptionName, String ruleName) { asyncClient.deleteRule(topicName, subscriptionName, ruleName).block(); } /** * Deletes a rule the matching {@code ruleName} and returns the HTTP response. * * @param topicName Name of topic associated with rule to delete. * @param subscriptionName Name of the subscription associated with the rule to delete. * @param ruleName Name of rule to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is an * empty string. * @throws NullPointerException if {@code topicName}, {@code subscriptionName}, or {@code ruleName} is null. * @throws ResourceNotFoundException if the {@code ruleName} does not exist. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { return asyncClient.deleteRuleWithResponse(topicName, subscriptionName, ruleName, context != null ? context : Context.NONE).block(); } /** * Deletes a subscription matching the {@code subscriptionName} in topic {@code topicName}. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Deletes a subscription the matching {@code subscriptionName} and returns the HTTP response. * * @param topicName Name of topic associated with subscription to delete. * @param subscriptionName Name of subscription to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { return asyncClient.deleteSubscriptionWithResponse(topicName, subscriptionName, context != null ? context : Context.NONE).block(); } /** * Deletes a topic the matching {@code topicName}. * * @param topicName Name of topic to delete. * * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public void deleteTopic(String topicName) { asyncClient.deleteTopic(topicName).block(); } /** * Deletes a topic the matching {@code topicName} and returns the HTTP response. * * @param topicName Name of topic to delete. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteTopicWithResponse(String topicName, Context context) { return asyncClient.deleteTopicWithResponse(topicName, context != null ? context : Context.NONE).block(); } /** * Gets information about the queue. * * @param queueName Name of queue to get information about. * * @return Information about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueProperties getQueue(String queueName) { return asyncClient.getQueue(queueName).block(); } /** * Gets information about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueProperties> getQueueWithResponse(String queueName, Context context) { return asyncClient.getQueueWithResponse(queueName, context != null ? context : Context.NONE, Function.identity()).block(); } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * * @return {@code true} if the queue exists; otherwise {@code false}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public boolean getQueueExists(String queueName) { final Boolean exists = asyncClient.getQueueExists(queueName).block(); return exists != null && exists; } /** * Gets whether or not a queue with {@code queueName} exists in the Service Bus namespace. * * @param queueName Name of the queue. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response and {@code true} if the queue exists; otherwise {@code false}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> getQueueExistsWithResponse(String queueName, Context context) { final Mono<Response<QueueProperties>> queueWithResponse = asyncClient.getQueueWithResponse(queueName, context != null ? context : Context.NONE, Function.identity()); return asyncClient.getEntityExistsWithResponse(queueWithResponse).block(); } /** * Gets runtime properties about the queue. * * @param queueName Name of queue to get information about. * * @return Runtime properties about the queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueRuntimeProperties getQueueRuntimeProperties(String queueName) { return asyncClient.getQueueRuntimeProperties(queueName).block(); } /** * Gets runtime properties about the queue along with its HTTP response. * * @param queueName Name of queue to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Runtime properties about the queue and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code queueName} is an empty string. * @throws NullPointerException if {@code queueName} is null. * @throws ResourceNotFoundException if the {@code queueName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueRuntimeProperties> getQueueRuntimePropertiesWithResponse(String queueName, Context context) { return asyncClient.getQueueWithResponse(queueName, context != null ? context : Context.NONE, QueueRuntimeProperties::new).block(); } /** * Gets information about the Service Bus namespace. * * @return Information about the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to the namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public NamespaceProperties getNamespaceProperties() { return asyncClient.getNamespaceProperties().block(); } /** * Gets information about the Service Bus namespace along with its HTTP response. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the namespace and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<NamespaceProperties> getNamespacePropertiesWithResponse(Context context) { return asyncClient.getNamespacePropertiesWithResponse(context).block(); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, boolean, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * * @return The associated rule. */ @ServiceMethod(returns = ReturnType.SINGLE) public RuleProperties getRule(String topicName, String subscriptionName, String ruleName) { return asyncClient.getRule(topicName, subscriptionName, ruleName).block(); } /** * Gets a rule from the service namespace. * * Only following data types are deserialized in Filters and Action parameters - string, int, long, bool, double, * and OffsetDateTime. Other data types would return its string value. * * @param topicName The name of the topic relative to service bus namespace. * @param subscriptionName The subscription name the rule belongs to. * @param ruleName The name of the rule to retrieve. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The associated rule with the corresponding HTTP response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<RuleProperties> getRuleWithResponse(String topicName, String subscriptionName, String ruleName, Context context) { return asyncClient.getRuleWithResponse(topicName, subscriptionName, ruleName, context != null ? context : Context.NONE).block(); } /** * Gets information about the queue. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return Information about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist in the {@code topicName}. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionProperties getSubscription(String topicName, String subscriptionName) { return asyncClient.getSubscription(topicName, subscriptionName).block(); } /** * Gets information about the subscription along with its HTTP response. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the subscription and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} are empty strings. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} are null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SubscriptionProperties> getSubscriptionWithResponse(String topicName, String subscriptionName, Context context) { return asyncClient.getSubscriptionWithResponse(topicName, subscriptionName, context != null ? context : Context.NONE, Function.identity()).block(); } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * * @return {@code true} if the subscription exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public boolean getSubscriptionExists(String topicName, String subscriptionName) { final Boolean exists = asyncClient.getSubscriptionExists(topicName, subscriptionName).block(); return exists != null && exists; } /** * Gets whether or not a subscription within a topic exists. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of the subscription. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response and {@code true} if the subscription exists; otherwise {@code false}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> getSubscriptionExistsWithResponse(String topicName, String subscriptionName, Context context) { final Mono<Response<SubscriptionProperties>> subscriptionWithResponse = asyncClient.getSubscriptionWithResponse(topicName, subscriptionName, context != null ? context : Context.NONE, Function.identity()); return asyncClient.getEntityExistsWithResponse(subscriptionWithResponse).block(); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * * @return Runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionRuntimeProperties getSubscriptionRuntimeProperties(String topicName, String subscriptionName) { return asyncClient.getSubscriptionRuntimeProperties(topicName, subscriptionName).block(); } /** * Gets runtime properties about the subscription. * * @param topicName Name of topic associated with subscription. * @param subscriptionName Name of subscription to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Runtime properties about the subscription. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code subscriptionName} is an empty string. * @throws NullPointerException if {@code subscriptionName} is null. * @throws ResourceNotFoundException if the {@code subscriptionName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SubscriptionRuntimeProperties> getSubscriptionRuntimePropertiesWithResponse( String topicName, String subscriptionName, Context context) { return asyncClient.getSubscriptionWithResponse(topicName, subscriptionName, context != null ? context : Context.NONE, SubscriptionRuntimeProperties::new).block(); } /** * Gets information about the topic. * * @param topicName Name of topic to get information about. * * @return Information about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicProperties getTopic(String topicName) { return asyncClient.getTopic(topicName).block(); } /** * Gets information about the topic along with its HTTP response. * * @param topicName Name of topic to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Information about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TopicProperties> getTopicWithResponse(String topicName, Context context) { return asyncClient.getTopicWithResponse(topicName, context != null ? context : Context.NONE, Function.identity()).block(); } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * * @return {@code true} if the topic exists. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public boolean getTopicExists(String topicName) { final Boolean exists = asyncClient.getTopicExists(topicName).block(); return exists != null && exists; } /** * Gets whether or not a topic with {@code topicName} exists in the Service Bus namespace. * * @param topicName Name of the topic. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The HTTP response and {@code true} if the topic exists; otherwise {@code false}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> getTopicExistsWithResponse(String topicName, Context context) { final Mono<Response<TopicProperties>> topicWithResponse = asyncClient.getTopicWithResponse(topicName, context != null ? context : Context.NONE, Function.identity()); return asyncClient.getEntityExistsWithResponse(topicWithResponse).block(); } /** * Gets runtime properties about the topic. * * @param topicName Name of topic to get information about. * * @return Runtime properties about the topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicRuntimeProperties getTopicRuntimeProperties(String topicName) { return asyncClient.getTopicRuntimeProperties(topicName).block(); } /** * Gets runtime properties about the topic with its HTTP response. * * @param topicName Name of topic to get information about. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Runtime properties about the topic and the associated HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If error occurred processing the request. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @throws NullPointerException if {@code topicName} is null. * @throws ResourceNotFoundException if the {@code topicName} does not exist. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TopicRuntimeProperties> getTopicRuntimePropertiesWithResponse(String topicName, Context context) { return asyncClient.getTopicWithResponse(topicName, context != null ? context : Context.NONE, TopicRuntimeProperties::new).block(); } /** * Fetches all the queues in the Service Bus namespace. * * @return A PagedIterable of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * Authorization Rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueProperties> listQueues() { return new PagedIterable<>(asyncClient.listQueues()); } /** * Fetches all the queues in the Service Bus namespace. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A PagedIterable of {@link QueueProperties queues} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * Authorization Rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<QueueProperties> listQueues(Context context) { final PagedFlux<QueueProperties> pagedFlux = new PagedFlux<>( () -> asyncClient.listQueuesFirstPage(context), continuationToken -> asyncClient.listQueuesNextPage(continuationToken, context != null ? context : Context.NONE)); return new PagedIterable<>(pagedFlux); } /** * Fetches all the rules for a topic and subscription. * * @param topicName The topic name under which all the rules need to be retrieved. * @param subscriptionName The name of the subscription for which all rules need to be retrieved. * * @return An iterable of {@link RuleProperties rules} for the {@code topicName} and {@code subscriptionName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} or {@code subscriptionName} is null. * @throws IllegalArgumentException if {@code topicName} or {@code subscriptionName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<RuleProperties> listRules(String topicName, String subscriptionName) { return new PagedIterable<>(asyncClient.listRules(topicName, subscriptionName)); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * * @return A paged iterable of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<SubscriptionProperties> listSubscriptions(String topicName) { return new PagedIterable<>(asyncClient.listSubscriptions(topicName)); } /** * Fetches all the subscriptions for a topic. * * @param topicName The topic name under which all the subscriptions need to be retrieved. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A paged iterable of {@link SubscriptionProperties subscriptions} for the {@code topicName}. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws NullPointerException if {@code topicName} is null. * @throws IllegalArgumentException if {@code topicName} is an empty string. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<SubscriptionProperties> listSubscriptions(String topicName, Context context) { final PagedFlux<SubscriptionProperties> pagedFlux = new PagedFlux<>( () -> asyncClient.listSubscriptionsFirstPage(topicName, context != null ? context : Context.NONE), continuationToken -> asyncClient.listSubscriptionsNextPage(topicName, continuationToken, context != null ? context : Context.NONE)); return new PagedIterable<>(pagedFlux); } /** * Fetches all the topics in the Service Bus namespace. * * @return A paged iterable of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<TopicProperties> listTopics() { return new PagedIterable<>(asyncClient.listTopics()); } /** * Fetches all the topics in the Service Bus namespace. * * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A paged iterable of {@link TopicProperties topics} in the Service Bus namespace. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @see <a href="https: * authorization rules</a> */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<TopicProperties> listTopics(Context context) { final PagedFlux<TopicProperties> pagedFlux = new PagedFlux<>( () -> asyncClient.listTopicsFirstPage(context), continuationToken -> asyncClient.listTopicsNextPage(continuationToken, context != null ? context : Context.NONE)); return new PagedIterable<>(pagedFlux); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return The updated queue. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public QueueProperties updateQueue(QueueProperties queue) { return asyncClient.updateQueue(queue).block(); } /** * Updates a queue with the given {@link QueueProperties}. The {@link QueueProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link QueueProperties * <li>{@link QueueProperties * <li>{@link QueueProperties * </li> * <li>{@link QueueProperties * </ul> * * @param queue Information about the queue to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The updated queue with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the queue quota is exceeded, or an error * occurred processing the request. * @throws NullPointerException if {@code queue} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<QueueProperties> updateQueueWithResponse(QueueProperties queue, Context context) { return asyncClient.updateQueueWithResponse(queue, context != null ? context : Context.NONE).block(); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return The updated rule. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public RuleProperties updateRule(String topicName, String subscriptionName, RuleProperties rule) { return asyncClient.updateRule(topicName, subscriptionName, rule).block(); } /** * Updates a rule with the given {@link RuleProperties}. The {@link RuleProperties} must be fully populated as all * of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * @param topicName The topic name under which the rule is updated. * @param subscriptionName The name of the subscription for which the rule is updated. * @param rule Information about the rule to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return A Mono that returns the updated rule in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the rule quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link RuleProperties * @throws NullPointerException if {@code rule} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<RuleProperties> updateRuleWithResponse(String topicName, String subscriptionName, RuleProperties rule, Context context) { return asyncClient.updateRuleWithResponse(topicName, subscriptionName, rule, context != null ? context : Context.NONE).block(); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * * @return Updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public SubscriptionProperties updateSubscription(SubscriptionProperties subscription) { return asyncClient.updateSubscription(subscription).block(); } /** * Updates a subscription with the given {@link SubscriptionProperties}. The {@link SubscriptionProperties} must be * fully populated as all of the properties are replaced. If a property is not set the service default value is * used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * <li>{@link SubscriptionProperties * </ul> * * @param subscription Information about the subscription to update. You must provide all the property values * that are desired on the updated entity. Any values not provided are set to the service default values. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return Updated subscription in addition to the HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the subscription quota is exceeded, or an * error occurred processing the request. * @throws IllegalArgumentException if {@link SubscriptionProperties * SubscriptionProperties * @throws NullPointerException if {@code subscription} is null. * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<SubscriptionProperties> updateSubscriptionWithResponse( SubscriptionProperties subscription, Context context) { return asyncClient.updateSubscriptionWithResponse(subscription, context != null ? context : Context.NONE) .block(); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * * @return The updated topic. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public TopicProperties updateTopic(TopicProperties topic) { return asyncClient.updateTopic(topic).block(); } /** * Updates a topic with the given {@link TopicProperties}. The {@link TopicProperties} must be fully populated as * all of the properties are replaced. If a property is not set the service default value is used. * * The suggested flow is: * <ol> * <li>{@link * <li>Update the required elements.</li> * <li>Pass the updated description into this method.</li> * </ol> * * <p> * There are a subset of properties that can be updated. More information can be found in the links below. They are: * <ul> * <li>{@link TopicProperties * <li>{@link TopicProperties * </li> * </ul> * * @param topic Information about the topic to update. You must provide all the property values that are desired * on the updated entity. Any values not provided are set to the service default values. * @param context Additional context that is passed through the HTTP pipeline during the service call. * * @return The updated topic with its HTTP response. * @throws ClientAuthenticationException if the client's credentials do not have access to modify the * namespace. * @throws HttpResponseException If the request body was invalid, the topic quota is exceeded, or an error * occurred processing the request. * @throws IllegalArgumentException if {@link TopicProperties * string. * @throws NullPointerException if {@code topic} is null. * @see <a href="https: * @see <a href="https: */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<TopicProperties> updateTopicWithResponse(TopicProperties topic, Context context) { return asyncClient.updateTopicWithResponse(topic, context != null ? context : Context.NONE).block(); } }
Can null check, we didn't previously so this would be a change in behavior
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream, int sizeHint) { return stream.collect(() -> new ByteBufferCollector(sizeHint), ByteBufferCollector::write) .map(ByteBufferCollector::toByteArray); }
return stream.collect(() -> new ByteBufferCollector(sizeHint), ByteBufferCollector::write)
public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream, int sizeHint) { return stream.collect(() -> new ByteBufferCollector(sizeHint), ByteBufferCollector::write) .map(ByteBufferCollector::toByteArray); }
class FluxUtil { /** * Checks if a type is Flux&lt;ByteBuffer&gt;. * * @param entityType the type to check * @return whether the type represents a Flux that emits ByteBuffer */ public static boolean isFluxByteBuffer(Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Flux.class)) { final Type innerType = TypeUtil.getTypeArguments(entityType)[0]; return TypeUtil.isTypeOrSubTypeOf(innerType, ByteBuffer.class); } return false; } /** * Collects ByteBuffers emitted by a Flux into a byte array. * * @param stream A stream which emits ByteBuffer instances. * @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux. * @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link * Integer */ public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream) { return stream.collect(ByteBufferCollector::new, ByteBufferCollector::write) .map(ByteBufferCollector::toByteArray); } /** * Collects ByteBuffers emitted by a Flux into a byte array. * <p> * Unlike {@link * This size hint allows for optimizations when creating the initial buffer to reduce the number of times it needs * to be resized while concatenating emitted ByteBuffers. * * @param stream A stream which emits ByteBuffer instances. * @param sizeHint A hint about the expected stream size. * @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux. * @throws IllegalArgumentException If {@code sizeHint} is equal to or less than {@code 0}. * @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link * Integer */ /** * Gets the content of the provided ByteBuffer as a byte array. This method will create a new byte array even if the * ByteBuffer can have optionally backing array. * * @param byteBuffer the byte buffer * @return the byte array */ public static byte[] byteBufferToArray(ByteBuffer byteBuffer) { int length = byteBuffer.remaining(); byte[] byteArray = new byte[length]; byteBuffer.get(byteArray); return byteArray; } /** * Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer} using a chunk size of 4096. * <p> * Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered * non-replayable as well. * <p> * If the passed {@link InputStream} is {@code null} {@link Flux * * @param inputStream The {@link InputStream} to convert into a {@link Flux}. * @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream. */ public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) { return toFluxByteBuffer(inputStream, 4096); } /** * Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer}. * <p> * Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered * non-replayable as well. * <p> * If the passed {@link InputStream} is {@code null} {@link Flux * * @param inputStream The {@link InputStream} to convert into a {@link Flux}. * @param chunkSize The requested size for each {@link ByteBuffer}. * @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream. * @throws IllegalArgumentException If {@code chunkSize} is less than or equal to {@code 0}. */ public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream, int chunkSize) { if (chunkSize <= 0) { return Flux.error(new IllegalArgumentException("'chunkSize' must be greater than 0.")); } if (inputStream == null) { return Flux.empty(); } return Flux.<ByteBuffer, InputStream>generate(() -> inputStream, (stream, sink) -> { byte[] buffer = new byte[chunkSize]; try { int offset = 0; while (offset < chunkSize) { int readCount = inputStream.read(buffer, offset, chunkSize - offset); if (readCount == -1) { if (offset > 0) { sink.next(ByteBuffer.wrap(buffer, 0, offset)); } sink.complete(); return stream; } offset += readCount; } sink.next(ByteBuffer.wrap(buffer)); } catch (IOException ex) { sink.error(ex); } return stream; }).filter(ByteBuffer::hasRemaining); } /** * This method converts the incoming {@code subscriberContext} from {@link reactor.util.context.Context Reactor * Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a * single entity of type {@code T} * <p> * If the reactor context is empty, {@link Context * </p> * * <p><strong>Code samples</strong></p> * {@codesnippet com.azure.core.implementation.util.fluxutil.withcontext} * * @param serviceCall The lambda function that makes the service call into which azure context will be passed * @param <T> The type of response returned from the service call * @return The response from service call */ public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall) { return withContext(serviceCall, Collections.emptyMap()); } /** * This method converts the incoming {@code subscriberContext} from {@link reactor.util.context.Context Reactor * Context} to {@link Context Azure Context}, adds the specified context attributes and calls the given lambda * function with this context and returns a single entity of type {@code T} * <p> * If the reactor context is empty, {@link Context * </p> * * @param serviceCall serviceCall The lambda function that makes the service call into which azure context will be * passed * @param contextAttributes The map of attributes sent by the calling method to be set on {@link Context}. * @param <T> The type of response returned from the service call * @return The response from service call */ public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall, Map<String, String> contextAttributes) { return Mono.subscriberContext() .map(context -> { final Context[] azureContext = new Context[]{Context.NONE}; if (!CoreUtils.isNullOrEmpty(contextAttributes)) { contextAttributes.forEach((key, value) -> azureContext[0] = azureContext[0].addData(key, value)); } if (!context.isEmpty()) { context.stream().forEach(entry -> azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue())); } return azureContext[0]; }) .flatMap(serviceCall); } /** * Converts the incoming content to Mono. * * @param <T> The type of the Response, which will be returned in the Mono. * @param response whose {@link Response * @return The converted {@link Mono} */ public static <T> Mono<T> toMono(Response<T> response) { return Mono.justOrEmpty(response.getValue()); } /** * Propagates a {@link RuntimeException} through the error channel of {@link Mono}. * * @param logger The {@link ClientLogger} to log the exception. * @param ex The {@link RuntimeException}. * @param <T> The return type. * @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}. */ public static <T> Mono<T> monoError(ClientLogger logger, RuntimeException ex) { return Mono.error(logger.logExceptionAsError(Exceptions.propagate(ex))); } /** * Propagates a {@link RuntimeException} through the error channel of {@link Flux}. * * @param logger The {@link ClientLogger} to log the exception. * @param ex The {@link RuntimeException}. * @param <T> The return type. * @return A {@link Flux} that terminates with error wrapping the {@link RuntimeException}. */ public static <T> Flux<T> fluxError(ClientLogger logger, RuntimeException ex) { return Flux.error(logger.logExceptionAsError(Exceptions.propagate(ex))); } /** * Propagates a {@link RuntimeException} through the error channel of {@link PagedFlux}. * * @param logger The {@link ClientLogger} to log the exception. * @param ex The {@link RuntimeException}. * @param <T> The return type. * @return A {@link PagedFlux} that terminates with error wrapping the {@link RuntimeException}. */ public static <T> PagedFlux<T> pagedFluxError(ClientLogger logger, RuntimeException ex) { return new PagedFlux<>(() -> monoError(logger, ex)); } /** * This method converts the incoming {@code subscriberContext} from {@link reactor.util.context.Context Reactor * Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a * collection of type {@code T} * <p> * If the reactor context is empty, {@link Context * </p> * * <p><strong>Code samples</strong></p> * {@codesnippet com.azure.core.implementation.util.fluxutil.fluxcontext} * * @param serviceCall The lambda function that makes the service call into which the context will be passed * @param <T> The type of response returned from the service call * @return The response from service call */ public static <T> Flux<T> fluxContext(Function<Context, Flux<T>> serviceCall) { return Mono.subscriberContext() .map(FluxUtil::toAzureContext) .flatMapMany(serviceCall); } /** * Converts a reactor context to azure context. If the reactor context is {@code null} or empty, {@link * Context * * @param context The reactor context * @return The azure context */ private static Context toAzureContext(reactor.util.context.Context context) { final Context[] azureContext = new Context[]{Context.NONE}; if (!context.isEmpty()) { context.stream().forEach(entry -> azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue())); } return azureContext[0]; } /** * Converts an Azure context to Reactor context. If the Azure context is {@code null} or empty, {@link * reactor.util.context.Context * * @param context The Azure context. * @return The Reactor context. */ public static reactor.util.context.Context toReactorContext(Context context) { if (context == null) { return reactor.util.context.Context.empty(); } Map<Object, Object> contextValues = context.getValues().entrySet().stream() .filter(kvp -> kvp.getValue() != null) .collect(Collectors.toMap(Entry::getKey, Entry::getValue)); return CoreUtils.isNullOrEmpty(contextValues) ? reactor.util.context.Context.empty() : reactor.util.context.Context.of(contextValues); } /** * Writes the bytes emitted by a Flux to an AsynchronousFileChannel. * * @param content the Flux content * @param outFile the file channel * @return a Mono which performs the write operation when subscribed */ public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile) { return writeFile(content, outFile, 0); } /** * Writes the bytes emitted by a Flux to an AsynchronousFileChannel starting at the given position in the file. * * @param content the Flux content * @param outFile the file channel * @param position the position in the file to begin writing * @return a Mono which performs the write operation when subscribed */ public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile, long position) { return Mono.create(emitter -> content.subscribe(new Subscriber<ByteBuffer>() { volatile boolean isWriting = false; volatile boolean isCompleted = false; volatile Subscription subscription; volatile long pos = position; @Override public void onSubscribe(Subscription s) { subscription = s; s.request(1); } @Override public void onNext(ByteBuffer bytes) { isWriting = true; outFile.write(bytes, pos, null, onWriteCompleted); } final CompletionHandler<Integer, Object> onWriteCompleted = new CompletionHandler<Integer, Object>() { @Override public void completed(Integer bytesWritten, Object attachment) { isWriting = false; if (isCompleted) { emitter.success(); } pos += bytesWritten; subscription.request(1); } @Override public void failed(Throwable exc, Object attachment) { subscription.cancel(); emitter.error(exc); } }; @Override public void onError(Throwable throwable) { subscription.cancel(); emitter.error(throwable); } @Override public void onComplete() { isCompleted = true; if (!isWriting) { emitter.success(); } } })); } /** * Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file into chunks of the * given size. * * @param fileChannel The file channel. * @param chunkSize the size of file chunks to read. * @param offset The offset in the file to begin reading. * @param length The number of bytes to read from the file. * @return the Flux. */ public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) { return new FileReadFlux(fileChannel, chunkSize, offset, length); } /** * Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file. * * @param fileChannel The file channel. * @param offset The offset in the file to begin reading. * @param length The number of bytes to read from the file. * @return the Flux. */ public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, long offset, long length) { return readFile(fileChannel, DEFAULT_CHUNK_SIZE, offset, length); } /** * Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads the entire file. * * @param fileChannel The file channel. * @return The AsyncInputStream. */ public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel) { try { long size = fileChannel.size(); return readFile(fileChannel, DEFAULT_CHUNK_SIZE, 0, size); } catch (IOException e) { return Flux.error(new RuntimeException("Failed to read the file.", e)); } } private static final int DEFAULT_CHUNK_SIZE = 1024 * 64; private static final class FileReadFlux extends Flux<ByteBuffer> { private final AsynchronousFileChannel fileChannel; private final int chunkSize; private final long offset; private final long length; FileReadFlux(AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) { this.fileChannel = fileChannel; this.chunkSize = chunkSize; this.offset = offset; this.length = length; } @Override public void subscribe(CoreSubscriber<? super ByteBuffer> actual) { FileReadSubscription subscription = new FileReadSubscription(actual, fileChannel, chunkSize, offset, length); actual.onSubscribe(subscription); } static final class FileReadSubscription implements Subscription, CompletionHandler<Integer, ByteBuffer> { private static final int NOT_SET = -1; private static final long serialVersionUID = -6831808726875304256L; private final Subscriber<? super ByteBuffer> subscriber; private volatile long position; private final AsynchronousFileChannel fileChannel; private final int chunkSize; private final long offset; private final long length; private volatile boolean done; private Throwable error; private volatile ByteBuffer next; private volatile boolean cancelled; volatile int wip; @SuppressWarnings("rawtypes") static final AtomicIntegerFieldUpdater<FileReadSubscription> WIP = AtomicIntegerFieldUpdater.newUpdater(FileReadSubscription.class, "wip"); volatile long requested; @SuppressWarnings("rawtypes") static final AtomicLongFieldUpdater<FileReadSubscription> REQUESTED = AtomicLongFieldUpdater.newUpdater(FileReadSubscription.class, "requested"); FileReadSubscription(Subscriber<? super ByteBuffer> subscriber, AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) { this.subscriber = subscriber; this.fileChannel = fileChannel; this.chunkSize = chunkSize; this.offset = offset; this.length = length; this.position = NOT_SET; } @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(); } } @Override public void cancel() { this.cancelled = true; } @Override public void completed(Integer bytesRead, ByteBuffer buffer) { if (!cancelled) { if (bytesRead == -1) { done = true; } else { long pos = position; int bytesWanted = Math.min(bytesRead, maxRequired(pos)); long position2 = pos + bytesWanted; position = position2; buffer.position(bytesWanted); buffer.flip(); next = buffer; if (position2 >= offset + length) { done = true; } } drain(); } } @Override public void failed(Throwable exc, ByteBuffer attachment) { if (!cancelled) { error = exc; done = true; drain(); } } private void drain() { if (WIP.getAndIncrement(this) != 0) { return; } if (position == NOT_SET) { position = offset; doRead(); } int missed = 1; while (true) { if (cancelled) { return; } if (REQUESTED.get(this) > 0) { boolean emitted = false; boolean d = done; ByteBuffer bb = next; if (bb != null) { next = null; subscriber.onNext(bb); emitted = true; } if (d) { if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } return; } if (emitted) { Operators.produced(REQUESTED, this, 1); doRead(); } } missed = WIP.addAndGet(this, -missed); if (missed == 0) { return; } } } private void doRead() { long pos = position; ByteBuffer innerBuf = ByteBuffer.allocate(Math.min(chunkSize, maxRequired(pos))); fileChannel.read(innerBuf, pos, innerBuf, this); } private int maxRequired(long pos) { long maxRequired = offset + length - pos; if (maxRequired <= 0) { return 0; } else { int m = (int) (maxRequired); if (m < 0) { return Integer.MAX_VALUE; } else { return m; } } } } } private FluxUtil() { } }
class FluxUtil { private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; /** * Checks if a type is Flux&lt;ByteBuffer&gt;. * * @param entityType the type to check * @return whether the type represents a Flux that emits ByteBuffer */ public static boolean isFluxByteBuffer(Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Flux.class)) { final Type innerType = TypeUtil.getTypeArguments(entityType)[0]; return TypeUtil.isTypeOrSubTypeOf(innerType, ByteBuffer.class); } return false; } /** * Collects ByteBuffers emitted by a Flux into a byte array. * * @param stream A stream which emits ByteBuffer instances. * @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux. * @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link * Integer */ public static Mono<byte[]> collectBytesInByteBufferStream(Flux<ByteBuffer> stream) { return stream.collect(ByteBufferCollector::new, ByteBufferCollector::write) .map(ByteBufferCollector::toByteArray); } /** * Collects ByteBuffers emitted by a Flux into a byte array. * <p> * Unlike {@link * This size hint allows for optimizations when creating the initial buffer to reduce the number of times it needs * to be resized while concatenating emitted ByteBuffers. * * @param stream A stream which emits ByteBuffer instances. * @param sizeHint A hint about the expected stream size. * @return A Mono which emits the concatenation of all the ByteBuffer instances given by the source Flux. * @throws IllegalArgumentException If {@code sizeHint} is equal to or less than {@code 0}. * @throws IllegalStateException If the combined size of the emitted ByteBuffers is greater than {@link * Integer */ /** * Collects ByteBuffers returned in a network response into a byte array. * <p> * The {@code headers} are inspected for containing an {@code Content-Length} which determines if a size hinted * collection, {@link * {@link * * @param stream A network response ByteBuffer stream. * @param headers The HTTP headers of the response. * @return A Mono which emits the collected network response ByteBuffers. * @throws NullPointerException If {@code headers} is null. * @throws IllegalStateException If the size of the network response is greater than {@link Integer */ public static Mono<byte[]> collectBytesFromNetworkResponse(Flux<ByteBuffer> stream, HttpHeaders headers) { Objects.requireNonNull(headers, "'headers' cannot be null."); String contentLengthHeader = headers.getValue("Content-Length"); if (contentLengthHeader == null) { return FluxUtil.collectBytesInByteBufferStream(stream); } else { try { int contentLength = Integer.parseInt(contentLengthHeader); if (contentLength > 0) { return FluxUtil.collectBytesInByteBufferStream(stream, contentLength); } else { return Mono.just(EMPTY_BYTE_ARRAY); } } catch (NumberFormatException ex) { return FluxUtil.collectBytesInByteBufferStream(stream); } } } /** * Gets the content of the provided ByteBuffer as a byte array. This method will create a new byte array even if the * ByteBuffer can have optionally backing array. * * @param byteBuffer the byte buffer * @return the byte array */ public static byte[] byteBufferToArray(ByteBuffer byteBuffer) { int length = byteBuffer.remaining(); byte[] byteArray = new byte[length]; byteBuffer.get(byteArray); return byteArray; } /** * Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer} using a chunk size of 4096. * <p> * Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered * non-replayable as well. * <p> * If the passed {@link InputStream} is {@code null} {@link Flux * * @param inputStream The {@link InputStream} to convert into a {@link Flux}. * @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream. */ public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream) { return toFluxByteBuffer(inputStream, 4096); } /** * Converts an {@link InputStream} into a {@link Flux} of {@link ByteBuffer}. * <p> * Given that {@link InputStream} is not guaranteed to be replayable the returned {@link Flux} should be considered * non-replayable as well. * <p> * If the passed {@link InputStream} is {@code null} {@link Flux * * @param inputStream The {@link InputStream} to convert into a {@link Flux}. * @param chunkSize The requested size for each {@link ByteBuffer}. * @return A {@link Flux} of {@link ByteBuffer ByteBuffers} that contains the contents of the stream. * @throws IllegalArgumentException If {@code chunkSize} is less than or equal to {@code 0}. */ public static Flux<ByteBuffer> toFluxByteBuffer(InputStream inputStream, int chunkSize) { if (chunkSize <= 0) { return Flux.error(new IllegalArgumentException("'chunkSize' must be greater than 0.")); } if (inputStream == null) { return Flux.empty(); } return Flux.<ByteBuffer, InputStream>generate(() -> inputStream, (stream, sink) -> { byte[] buffer = new byte[chunkSize]; try { int offset = 0; while (offset < chunkSize) { int readCount = inputStream.read(buffer, offset, chunkSize - offset); if (readCount == -1) { if (offset > 0) { sink.next(ByteBuffer.wrap(buffer, 0, offset)); } sink.complete(); return stream; } offset += readCount; } sink.next(ByteBuffer.wrap(buffer)); } catch (IOException ex) { sink.error(ex); } return stream; }).filter(ByteBuffer::hasRemaining); } /** * This method converts the incoming {@code subscriberContext} from {@link reactor.util.context.Context Reactor * Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a * single entity of type {@code T} * <p> * If the reactor context is empty, {@link Context * </p> * * <p><strong>Code samples</strong></p> * {@codesnippet com.azure.core.implementation.util.fluxutil.withcontext} * * @param serviceCall The lambda function that makes the service call into which azure context will be passed * @param <T> The type of response returned from the service call * @return The response from service call */ public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall) { return withContext(serviceCall, Collections.emptyMap()); } /** * This method converts the incoming {@code subscriberContext} from {@link reactor.util.context.Context Reactor * Context} to {@link Context Azure Context}, adds the specified context attributes and calls the given lambda * function with this context and returns a single entity of type {@code T} * <p> * If the reactor context is empty, {@link Context * </p> * * @param serviceCall serviceCall The lambda function that makes the service call into which azure context will be * passed * @param contextAttributes The map of attributes sent by the calling method to be set on {@link Context}. * @param <T> The type of response returned from the service call * @return The response from service call */ public static <T> Mono<T> withContext(Function<Context, Mono<T>> serviceCall, Map<String, String> contextAttributes) { return Mono.subscriberContext() .map(context -> { final Context[] azureContext = new Context[]{Context.NONE}; if (!CoreUtils.isNullOrEmpty(contextAttributes)) { contextAttributes.forEach((key, value) -> azureContext[0] = azureContext[0].addData(key, value)); } if (!context.isEmpty()) { context.stream().forEach(entry -> azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue())); } return azureContext[0]; }) .flatMap(serviceCall); } /** * Converts the incoming content to Mono. * * @param <T> The type of the Response, which will be returned in the Mono. * @param response whose {@link Response * @return The converted {@link Mono} */ public static <T> Mono<T> toMono(Response<T> response) { return Mono.justOrEmpty(response.getValue()); } /** * Propagates a {@link RuntimeException} through the error channel of {@link Mono}. * * @param logger The {@link ClientLogger} to log the exception. * @param ex The {@link RuntimeException}. * @param <T> The return type. * @return A {@link Mono} that terminates with error wrapping the {@link RuntimeException}. */ public static <T> Mono<T> monoError(ClientLogger logger, RuntimeException ex) { return Mono.error(logger.logExceptionAsError(Exceptions.propagate(ex))); } /** * Propagates a {@link RuntimeException} through the error channel of {@link Flux}. * * @param logger The {@link ClientLogger} to log the exception. * @param ex The {@link RuntimeException}. * @param <T> The return type. * @return A {@link Flux} that terminates with error wrapping the {@link RuntimeException}. */ public static <T> Flux<T> fluxError(ClientLogger logger, RuntimeException ex) { return Flux.error(logger.logExceptionAsError(Exceptions.propagate(ex))); } /** * Propagates a {@link RuntimeException} through the error channel of {@link PagedFlux}. * * @param logger The {@link ClientLogger} to log the exception. * @param ex The {@link RuntimeException}. * @param <T> The return type. * @return A {@link PagedFlux} that terminates with error wrapping the {@link RuntimeException}. */ public static <T> PagedFlux<T> pagedFluxError(ClientLogger logger, RuntimeException ex) { return new PagedFlux<>(() -> monoError(logger, ex)); } /** * This method converts the incoming {@code subscriberContext} from {@link reactor.util.context.Context Reactor * Context} to {@link Context Azure Context} and calls the given lambda function with this context and returns a * collection of type {@code T} * <p> * If the reactor context is empty, {@link Context * </p> * * <p><strong>Code samples</strong></p> * {@codesnippet com.azure.core.implementation.util.fluxutil.fluxcontext} * * @param serviceCall The lambda function that makes the service call into which the context will be passed * @param <T> The type of response returned from the service call * @return The response from service call */ public static <T> Flux<T> fluxContext(Function<Context, Flux<T>> serviceCall) { return Mono.subscriberContext() .map(FluxUtil::toAzureContext) .flatMapMany(serviceCall); } /** * Converts a reactor context to azure context. If the reactor context is {@code null} or empty, {@link * Context * * @param context The reactor context * @return The azure context */ private static Context toAzureContext(reactor.util.context.Context context) { final Context[] azureContext = new Context[]{Context.NONE}; if (!context.isEmpty()) { context.stream().forEach(entry -> azureContext[0] = azureContext[0].addData(entry.getKey(), entry.getValue())); } return azureContext[0]; } /** * Converts an Azure context to Reactor context. If the Azure context is {@code null} or empty, {@link * reactor.util.context.Context * * @param context The Azure context. * @return The Reactor context. */ public static reactor.util.context.Context toReactorContext(Context context) { if (context == null) { return reactor.util.context.Context.empty(); } Map<Object, Object> contextValues = context.getValues().entrySet().stream() .filter(kvp -> kvp.getValue() != null) .collect(Collectors.toMap(Entry::getKey, Entry::getValue)); return CoreUtils.isNullOrEmpty(contextValues) ? reactor.util.context.Context.empty() : reactor.util.context.Context.of(contextValues); } /** * Writes the bytes emitted by a Flux to an AsynchronousFileChannel. * * @param content the Flux content * @param outFile the file channel * @return a Mono which performs the write operation when subscribed */ public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile) { return writeFile(content, outFile, 0); } /** * Writes the bytes emitted by a Flux to an AsynchronousFileChannel starting at the given position in the file. * * @param content the Flux content * @param outFile the file channel * @param position the position in the file to begin writing * @return a Mono which performs the write operation when subscribed */ public static Mono<Void> writeFile(Flux<ByteBuffer> content, AsynchronousFileChannel outFile, long position) { return Mono.create(emitter -> content.subscribe(new Subscriber<ByteBuffer>() { volatile boolean isWriting = false; volatile boolean isCompleted = false; volatile Subscription subscription; volatile long pos = position; @Override public void onSubscribe(Subscription s) { subscription = s; s.request(1); } @Override public void onNext(ByteBuffer bytes) { isWriting = true; outFile.write(bytes, pos, null, onWriteCompleted); } final CompletionHandler<Integer, Object> onWriteCompleted = new CompletionHandler<Integer, Object>() { @Override public void completed(Integer bytesWritten, Object attachment) { isWriting = false; if (isCompleted) { emitter.success(); } pos += bytesWritten; subscription.request(1); } @Override public void failed(Throwable exc, Object attachment) { subscription.cancel(); emitter.error(exc); } }; @Override public void onError(Throwable throwable) { subscription.cancel(); emitter.error(throwable); } @Override public void onComplete() { isCompleted = true; if (!isWriting) { emitter.success(); } } })); } /** * Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file into chunks of the * given size. * * @param fileChannel The file channel. * @param chunkSize the size of file chunks to read. * @param offset The offset in the file to begin reading. * @param length The number of bytes to read from the file. * @return the Flux. */ public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) { return new FileReadFlux(fileChannel, chunkSize, offset, length); } /** * Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads part of a file. * * @param fileChannel The file channel. * @param offset The offset in the file to begin reading. * @param length The number of bytes to read from the file. * @return the Flux. */ public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel, long offset, long length) { return readFile(fileChannel, DEFAULT_CHUNK_SIZE, offset, length); } /** * Creates a {@link Flux} from an {@link AsynchronousFileChannel} which reads the entire file. * * @param fileChannel The file channel. * @return The AsyncInputStream. */ public static Flux<ByteBuffer> readFile(AsynchronousFileChannel fileChannel) { try { long size = fileChannel.size(); return readFile(fileChannel, DEFAULT_CHUNK_SIZE, 0, size); } catch (IOException e) { return Flux.error(new RuntimeException("Failed to read the file.", e)); } } private static final int DEFAULT_CHUNK_SIZE = 1024 * 64; private static final class FileReadFlux extends Flux<ByteBuffer> { private final AsynchronousFileChannel fileChannel; private final int chunkSize; private final long offset; private final long length; FileReadFlux(AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) { this.fileChannel = fileChannel; this.chunkSize = chunkSize; this.offset = offset; this.length = length; } @Override public void subscribe(CoreSubscriber<? super ByteBuffer> actual) { FileReadSubscription subscription = new FileReadSubscription(actual, fileChannel, chunkSize, offset, length); actual.onSubscribe(subscription); } static final class FileReadSubscription implements Subscription, CompletionHandler<Integer, ByteBuffer> { private static final int NOT_SET = -1; private static final long serialVersionUID = -6831808726875304256L; private final Subscriber<? super ByteBuffer> subscriber; private volatile long position; private final AsynchronousFileChannel fileChannel; private final int chunkSize; private final long offset; private final long length; private volatile boolean done; private Throwable error; private volatile ByteBuffer next; private volatile boolean cancelled; volatile int wip; @SuppressWarnings("rawtypes") static final AtomicIntegerFieldUpdater<FileReadSubscription> WIP = AtomicIntegerFieldUpdater.newUpdater(FileReadSubscription.class, "wip"); volatile long requested; @SuppressWarnings("rawtypes") static final AtomicLongFieldUpdater<FileReadSubscription> REQUESTED = AtomicLongFieldUpdater.newUpdater(FileReadSubscription.class, "requested"); FileReadSubscription(Subscriber<? super ByteBuffer> subscriber, AsynchronousFileChannel fileChannel, int chunkSize, long offset, long length) { this.subscriber = subscriber; this.fileChannel = fileChannel; this.chunkSize = chunkSize; this.offset = offset; this.length = length; this.position = NOT_SET; } @Override public void request(long n) { if (Operators.validate(n)) { Operators.addCap(REQUESTED, this, n); drain(); } } @Override public void cancel() { this.cancelled = true; } @Override public void completed(Integer bytesRead, ByteBuffer buffer) { if (!cancelled) { if (bytesRead == -1) { done = true; } else { long pos = position; int bytesWanted = Math.min(bytesRead, maxRequired(pos)); long position2 = pos + bytesWanted; position = position2; buffer.position(bytesWanted); buffer.flip(); next = buffer; if (position2 >= offset + length) { done = true; } } drain(); } } @Override public void failed(Throwable exc, ByteBuffer attachment) { if (!cancelled) { error = exc; done = true; drain(); } } private void drain() { if (WIP.getAndIncrement(this) != 0) { return; } if (position == NOT_SET) { position = offset; doRead(); } int missed = 1; while (true) { if (cancelled) { return; } if (REQUESTED.get(this) > 0) { boolean emitted = false; boolean d = done; ByteBuffer bb = next; if (bb != null) { next = null; subscriber.onNext(bb); emitted = true; } if (d) { if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } return; } if (emitted) { Operators.produced(REQUESTED, this, 1); doRead(); } } missed = WIP.addAndGet(this, -missed); if (missed == 0) { return; } } } private void doRead() { long pos = position; ByteBuffer innerBuf = ByteBuffer.allocate(Math.min(chunkSize, maxRequired(pos))); fileChannel.read(innerBuf, pos, innerBuf, this); } private int maxRequired(long pos) { long maxRequired = offset + length - pos; if (maxRequired <= 0) { return 0; } else { int m = (int) (maxRequired); if (m < 0) { return Integer.MAX_VALUE; } else { return m; } } } } } private FluxUtil() { } }
we may refactor this with @After
public void testSignIn() { try { aadb2cSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadb2cSeleniumITHelper.signIn(AAD_B2C_SIGN_UP_OR_SIGN_IN); String name = aadb2cSeleniumITHelper.getName(); String userFlowName = aadb2cSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); } finally { aadb2cSeleniumITHelperDestroy(); } }
aadb2cSeleniumITHelperDestroy();
public void testSignIn() { String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadb2cSeleniumITHelper; @Test @Test public void testProfileEdit() { try { aadb2cSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadb2cSeleniumITHelper.signIn(AAD_B2C_SIGN_UP_OR_SIGN_IN); aadb2cSeleniumITHelper.profileEditJobTitle(JOB_TITLE_A_WORKER); String currentJobTitle = aadb2cSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadb2cSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadb2cSeleniumITHelper.getName(); String jobTitle = aadb2cSeleniumITHelper.getJobTitle(); String userFlowName = aadb2cSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); } finally { aadb2cSeleniumITHelperDestroy(); } } @Test public void testLogOut() { try { aadb2cSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadb2cSeleniumITHelper.signIn(AAD_B2C_SIGN_UP_OR_SIGN_IN); aadb2cSeleniumITHelper.logout(); String signInButtonText = aadb2cSeleniumITHelper.getSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); aadb2cSeleniumITHelper.destroy(); } finally { aadb2cSeleniumITHelperDestroy(); } } private void aadb2cSeleniumITHelperDestroy() { if (aadb2cSeleniumITHelper != null) { aadb2cSeleniumITHelper.destroy(); } } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Before public void initAndSignIn() { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, createDefaultProperteis()); aadB2CSeleniumITHelper.logIn(); } @Test @Test public void testProfileEdit() { aadB2CSeleniumITHelper.profileEditJobTitle(JOB_TITLE_A_WORKER); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AAD_B2C_PROFILE_EDIT, userFlowName); } @Test public void testLogOut() { aadB2CSeleniumITHelper.logout(); String signInButtonText = aadB2CSeleniumITHelper.getSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); } @After public void destroy() { aadB2CSeleniumITHelper.destroy(); } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
we may refactor with @After
public AADB2CSeleniumITHelper(Class<?> appClass, Map<String, String> properties) { try { userEmail = AADB2CTestUtils.AAD_B2C_USER_EMAIL; userPassword = AADB2CTestUtils.AAD_B2C_USER_PASSWORD; app = new AppRunner(appClass); DEFAULT_PROPERTIES.forEach(app::property); properties.forEach(app::property); setDriver(); this.app.start(); } catch (Exception e) { LOGGER.error("AADB2CSeleniumITHelper initialization produces an exception. ", e); app.close(); } }
app.close();
public AADB2CSeleniumITHelper(Class<?> appClass, Map<String, String> properties) { super(appClass, properties); userEmail = AAD_B2C_USER_EMAIL; userPassword = AAD_B2C_USER_PASSWORD; }
class AADB2CSeleniumITHelper extends SeleniumITHelper { private String userEmail; private String userPassword; private static final Map<String, String> DEFAULT_PROPERTIES = new HashMap<>(); private static final Logger LOGGER = LoggerFactory.getLogger(AADB2CSeleniumITHelper.class); static { DEFAULT_PROPERTIES.put("azure.activedirectory.b2c.tenant", AADB2CTestUtils.AAD_B2C_TENANT); DEFAULT_PROPERTIES.put("azure.activedirectory.b2c.client-id", AADB2CTestUtils.AAD_B2C_CLIENT_ID); DEFAULT_PROPERTIES.put("azure.activedirectory.b2c.client-secret", AADB2CTestUtils.AAD_B2C_CLIENT_SECRET); DEFAULT_PROPERTIES.put("azure.activedirectory.b2c.reply-url", AADB2CTestUtils.AAD_B2C_REPLY_URL); DEFAULT_PROPERTIES .put("azure.activedirectory.b2c.user-flows.sign-up-or-sign-in", AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); DEFAULT_PROPERTIES .put("azure.activedirectory.b2c.user-flows.profile-edit", AADB2CTestUtils.AAD_B2C_PROFILE_EDIT); } public void signIn(String userFlowName) { driver.get(app.root()); wait.until(ExpectedConditions .elementToBeClickable(By.cssSelector("a[href='/oauth2/authorization/" + userFlowName + "']"))).click(); wait.until(presenceOfElementLocated(By.id("email"))).sendKeys(userEmail); wait.until(presenceOfElementLocated(By.id("password"))).sendKeys(userPassword); wait.until(presenceOfElementLocated(By.cssSelector("button[type='submit']"))).click(); manualRedirection(); } public void profileEditJobTitle(String newJobTitle) { wait.until(presenceOfElementLocated(By.id("profileEdit"))).click(); changeJobTile(newJobTitle); driver.findElement(By.cssSelector("button[type='submit']")).click(); manualRedirection(); } public void logout() { wait.until(presenceOfElementLocated(By.id("logout"))).click(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).submit(); manualRedirection(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector( "a[href='/oauth2/authorization/" + AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN + "']"))).click(); } private void manualRedirection() { wait.until(ExpectedConditions.urlMatches("^http: String currentUrl = driver.getCurrentUrl(); String newCurrentUrl = currentUrl.replaceFirst("http: driver.get(newCurrentUrl); } public void changeJobTile(String newValue) { String elementId = "jobTitle"; wait.until(presenceOfElementLocated(By.id(elementId))).clear(); driver.findElement(By.id(elementId)).sendKeys(newValue); } public String getJobTitle() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[10]")) .findElement(By.xpath("th[2]")) .getText(); } public String getName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[2]")) .findElement(By.xpath("th[2]")) .getText(); } public String getUserFlowName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[last()]")) .findElement(By.xpath("th[2]")) .getText(); } public String getSignInButtonText() { return wait.until(presenceOfElementLocated(By.cssSelector("button[type='submit']"))).getText(); } }
class AADB2CSeleniumITHelper extends SeleniumITHelper { private String userEmail; private String userPassword; public static Map<String, String> createDefaultProperteis() { Map<String, String> defaultProperteis = new HashMap<>(); defaultProperteis.put("azure.activedirectory.b2c.tenant", AAD_B2C_TENANT); defaultProperteis.put("azure.activedirectory.b2c.client-id", AAD_B2C_CLIENT_ID); defaultProperteis.put("azure.activedirectory.b2c.client-secret", AAD_B2C_CLIENT_SECRET); defaultProperteis.put("azure.activedirectory.b2c.reply-url", AAD_B2C_REPLY_URL); defaultProperteis .put("azure.activedirectory.b2c.user-flows.profile-edit", AAD_B2C_PROFILE_EDIT); defaultProperteis .put("azure.activedirectory.b2c.user-flows.sign-up-or-sign-in", AAD_B2C_SIGN_UP_OR_SIGN_IN); return defaultProperteis; } public void logIn() { driver.get(app.root()); wait.until(presenceOfElementLocated(By.id("email"))).sendKeys(userEmail); wait.until(presenceOfElementLocated(By.id("password"))).sendKeys(userPassword); wait.until(presenceOfElementLocated(By.cssSelector("button[type='submit']"))).sendKeys(Keys.ENTER); manualRedirection(); } public void profileEditJobTitle(String newJobTitle) { wait.until(presenceOfElementLocated(By.id("profileEdit"))).click(); changeJobTile(newJobTitle); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).click(); manualRedirection(); } public void logout() { wait.until(presenceOfElementLocated(By.id("logout"))).click(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).submit(); manualRedirection(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector( "a[href='/oauth2/authorization/" + AAD_B2C_SIGN_UP_OR_SIGN_IN + "']"))).click(); } private void manualRedirection() { wait.until(ExpectedConditions.urlMatches("^http: String currentUrl = driver.getCurrentUrl(); String newCurrentUrl = currentUrl.replaceFirst("http: driver.get(newCurrentUrl); } public void changeJobTile(String newValue) { String elementId = "jobTitle"; wait.until(presenceOfElementLocated(By.id(elementId))).clear(); wait.until(presenceOfElementLocated(By.id(elementId))).sendKeys(newValue); } public String getJobTitle() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[10]")) .findElement(By.xpath("th[2]")) .getText(); } public String getName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[2]")) .findElement(By.xpath("th[2]")) .getText(); } public String getUserFlowName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[last()]")) .findElement(By.xpath("th[2]")) .getText(); } public String getSignInButtonText() { return wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).getText(); } }
do we need this here?
public void testLogOut() { aadb2cSeleniumITHelper.logout(); String signInButtonText = aadb2cSeleniumITHelper.getSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); aadb2cSeleniumITHelper.destroy(); }
aadb2cSeleniumITHelper.destroy();
public void testLogOut() { aadB2CSeleniumITHelper.logout(); String signInButtonText = aadB2CSeleniumITHelper.getSignInButtonText(); Assert.assertEquals("Sign in", signInButtonText); }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadb2cSeleniumITHelper; @Before public void aadb2cSeleniumITHelperInitAndSignIn(){ aadb2cSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, Collections.emptyMap()); aadb2cSeleniumITHelper.setDriver(); aadb2cSeleniumITHelper.appInit(); aadb2cSeleniumITHelper.signIn(); } @Test public void testSignIn() { String name = aadb2cSeleniumITHelper.getName(); String userFlowName = aadb2cSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); } @Test public void testProfileEdit() { aadb2cSeleniumITHelper.profileEditJobTitle(JOB_TITLE_A_WORKER); String currentJobTitle = aadb2cSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadb2cSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadb2cSeleniumITHelper.getName(); String jobTitle = aadb2cSeleniumITHelper.getJobTitle(); String userFlowName = aadb2cSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AADB2CTestUtils.AAD_B2C_PROFILE_EDIT, userFlowName); } @Test @After public void aadb2cSeleniumITHelperDestroy() { if (aadb2cSeleniumITHelper != null) { aadb2cSeleniumITHelper.destroy(); } } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
class AADB2CIT { private final String JOB_TITLE_A_WORKER = "a worker"; private final String JOB_TITLE_WORKER = "worker"; private AADB2CSeleniumITHelper aadB2CSeleniumITHelper; @Before public void initAndSignIn() { aadB2CSeleniumITHelper = new AADB2CSeleniumITHelper(DumbApp.class, createDefaultProperteis()); aadB2CSeleniumITHelper.logIn(); } @Test public void testSignIn() { String name = aadB2CSeleniumITHelper.getName(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(userFlowName); Assert.assertEquals(AAD_B2C_SIGN_UP_OR_SIGN_IN, userFlowName); } @Test public void testProfileEdit() { aadB2CSeleniumITHelper.profileEditJobTitle(JOB_TITLE_A_WORKER); String currentJobTitle = aadB2CSeleniumITHelper.getJobTitle(); String newJobTitle = JOB_TITLE_A_WORKER.equals(currentJobTitle) ? JOB_TITLE_WORKER : JOB_TITLE_A_WORKER; aadB2CSeleniumITHelper.profileEditJobTitle(newJobTitle); String name = aadB2CSeleniumITHelper.getName(); String jobTitle = aadB2CSeleniumITHelper.getJobTitle(); String userFlowName = aadB2CSeleniumITHelper.getUserFlowName(); Assert.assertNotNull(name); Assert.assertNotNull(jobTitle); Assert.assertEquals(newJobTitle, jobTitle); Assert.assertEquals(AAD_B2C_PROFILE_EDIT, userFlowName); } @Test @After public void destroy() { aadB2CSeleniumITHelper.destroy(); } @EnableWebSecurity @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @Controller public static class DumbApp extends WebSecurityConfigurerAdapter { private final AADB2COidcLoginConfigurer configurer; public DumbApp(AADB2COidcLoginConfigurer configurer) { this.configurer = configurer; } @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest() .authenticated() .and() .apply(configurer); } @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "index"; } private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); } } } }
If we use `this` we should use it for `app` too.
public void destroy() { if (driver != null) { this.driver.quit(); } if (app != null) { app.close(); } }
app.close();
public void destroy() { driver.quit(); app.close(); }
class SeleniumITHelper { protected AppRunner app; protected WebDriver driver; protected WebDriverWait wait; protected Class<?> appClass; protected Map<String, String> properties = Collections.emptyMap(); static { init(); } private static void init() { final String chromedriverLinux = "chromedriver_linux64"; final String chromedriverWin32 = "chromedriver_win32.exe"; final String chromedriverMac = "chromedriver_mac64"; String classpath = SeleniumITHelper.class.getProtectionDomain().getCodeSource().getLocation().getPath(); final String directory = classpath + "driver/"; String osName = System.getProperty("os.name").toLowerCase(); Process process = null; try { File dir = new File(directory); if (Pattern.matches("linux.*", osName)) { process = Runtime.getRuntime().exec("chmod +x " + chromedriverLinux, null, dir); process.waitFor(); System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, directory + chromedriverLinux); } else if (Pattern.matches("windows.*", osName)) { System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, directory + chromedriverWin32); } else if (Pattern.matches("mac.*", osName)) { process = Runtime.getRuntime().exec("chmod +x " + chromedriverMac, null, dir); process.waitFor(); System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, directory + chromedriverMac); } else { throw new IllegalStateException("Unrecognized osName. osName = " + System.getProperty("os.name")); } } catch (InterruptedException | IOException e) { throw new RuntimeException(e); } finally { if (process != null) { process.destroyForcibly(); } } } public void setDriver() { if (driver == null) { ChromeOptions options = new ChromeOptions(); options.addArguments("--headless"); options.addArguments("--incognito", "--no-sandbox", "--disable-dev-shm-usage"); this.driver = new ChromeDriver(options); wait = new WebDriverWait(driver, 10); } } public abstract void appInit(); }
class SeleniumITHelper { protected AppRunner app; protected WebDriver driver; protected WebDriverWait wait; private final static String tempDirPath = System.getProperty("java.io.tmpdir") + File.separator + UUID.randomUUID(); static { initChromeDriver(); deleteChromeDriverFile(); } public SeleniumITHelper(Class<?> appClass, Map<String, String> properties) { createDriver(); createAppRunner(appClass, properties); } private static void initChromeDriver() { final String chromedriverLinux = "chromedriver_linux64"; final String chromedriverWin32 = "chromedriver_win32.exe"; final String chromedriverMac = "chromedriver_mac64"; String osName = System.getProperty("os.name").toLowerCase(); Process process = null; File dir; try { if (Pattern.matches("linux.*", osName)) { dir = copyChromeDriverFile(chromedriverLinux); process = Runtime.getRuntime().exec("chmod +x " + chromedriverLinux, null, dir.getParentFile()); process.waitFor(); System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, dir.getPath()); } else if (Pattern.matches("windows.*", osName)) { dir = copyChromeDriverFile(chromedriverWin32); System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, dir.getPath()); } else if (Pattern.matches("mac.*", osName)) { dir = copyChromeDriverFile(chromedriverMac); process = Runtime.getRuntime().exec("chmod +x " + chromedriverMac, null, dir.getParentFile()); process.waitFor(); System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, dir.getPath()); } else { throw new IllegalStateException("Unrecognized osName. osName = " + System.getProperty("os.name")); } } catch (InterruptedException | IOException e) { throw new RuntimeException(e); } finally { if (process != null) { process.destroyForcibly(); } } } protected void createDriver() { if (driver == null) { ChromeOptions options = new ChromeOptions(); options.addArguments("--headless"); options.addArguments("--incognito", "--no-sandbox", "--disable-dev-shm-usage"); driver = new ChromeDriver(options); wait = new WebDriverWait(driver, 10); } } protected void createAppRunner(Class<?> appClass, Map<String, String> properties) { app = new AppRunner(appClass); properties.forEach(app::property); app.start(); } private static File copyChromeDriverFile(String chromeDriverName) throws IOException { InputStream resourceAsStream = SeleniumITHelper.class.getClassLoader() .getResourceAsStream("driver/" + chromeDriverName); File dest = new File(tempDirPath + File.separator + chromeDriverName); FileUtils.copyInputStreamToFile(resourceAsStream, dest); return dest; } private static void deleteChromeDriverFile() { File targetFile = new File(tempDirPath); try { FileUtils.forceDeleteOnExit(targetFile); } catch (IOException e) { e.printStackTrace(); } } /** * Manually invoke destroy to complete resource release. */ }
If there are no further references of appclass and properties in this class, we should not keep them as fields.
public AADSeleniumITHelper(Class<?> appClass, Map<String, String> properties) { username = AAD_USER_NAME_1; password = AAD_USER_PASSWORD_1; this.appClass = appClass; this.properties = properties; createDriver(); createAppRunner(); }
this.appClass = appClass;
public AADSeleniumITHelper(Class<?> appClass, Map<String, String> properties) { super(appClass, properties); username = AAD_USER_NAME_1; password = AAD_USER_PASSWORD_1; }
class AADSeleniumITHelper extends SeleniumITHelper { private String username; private String password; static { DEFAULT_PROPERTIES.put("azure.activedirectory.tenant-id", AAD_TENANT_ID_1); DEFAULT_PROPERTIES.put("azure.activedirectory.client-id", AAD_MULTI_TENANT_CLIENT_ID); DEFAULT_PROPERTIES.put("azure.activedirectory.client-secret", AAD_MULTI_TENANT_CLIENT_SECRET); DEFAULT_PROPERTIES.put("azure.activedirectory.user-group.allowed-groups", "group1"); DEFAULT_PROPERTIES.put("azure.activedirectory.post-logout-redirect-uri", "http: } public void login() { driver.get(app.root() + "oauth2/authorization/azure"); wait.until(presenceOfElementLocated(By.name("loginfmt"))).sendKeys(username + Keys.ENTER); wait.until(presenceOfElementLocated(By.name("passwd"))).sendKeys(password + Keys.ENTER); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("input[type='submit']"))).click(); } public String httpGet(String endpoint) { driver.get((app.root() + endpoint)); return wait.until(presenceOfElementLocated(By.tagName("body"))).getText(); } public void logoutTest() { driver.get(app.root() + "logout"); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).click(); String cssSelector = "div[data-test-id='" + username + "']"; wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector(cssSelector))).click(); String id = wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("div[tabindex='0']"))) .getAttribute("data-test-id"); Assert.assertEquals(username, id); } }
class AADSeleniumITHelper extends SeleniumITHelper { private String username; private String password; public static Map<String, String> createDefaultProperties() { Map<String, String> defaultProperties = new HashMap<>(); defaultProperties.put("azure.activedirectory.tenant-id", AAD_TENANT_ID_1); defaultProperties.put("azure.activedirectory.client-id", AAD_SINGLE_TENANT_CLIENT_ID); defaultProperties.put("azure.activedirectory.client-secret", AAD_SINGLE_TENANT_CLIENT_SECRET); defaultProperties.put("azure.activedirectory.user-group.allowed-groups", "group1"); defaultProperties.put("azure.activedirectory.post-logout-redirect-uri", "http: return defaultProperties; } public void logIn() { driver.get(app.root() + "oauth2/authorization/azure"); wait.until(ExpectedConditions.visibilityOfElementLocated(By.name("loginfmt"))).sendKeys(username + Keys.ENTER); wait.until(ExpectedConditions.visibilityOfElementLocated(By.name("passwd"))).sendKeys(password + Keys.ENTER); wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("input[type='submit']"))).click(); } public String httpGet(String endpoint) { driver.get((app.root() + endpoint)); return wait.until(presenceOfElementLocated(By.tagName("body"))).getText(); } public void logoutTest() { driver.get(app.root() + "logout"); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).click(); String cssSelector = "div[data-test-id='" + username + "']"; wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector(cssSelector))).click(); String id = wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("div[tabindex='0']"))) .getAttribute("data-test-id"); Assert.assertEquals(username, id); } }
If there are no further references of appclass and properties in this class, we should not keep them as fields.
public AADB2CSeleniumITHelper(Class<?> appClass, Map<String, String> properties) { userEmail = AADB2CTestUtils.AAD_B2C_USER_EMAIL; userPassword = AADB2CTestUtils.AAD_B2C_USER_PASSWORD; this.appClass = appClass; this.properties = properties; createDriver(); createAppRunner(); }
this.appClass = appClass;
public AADB2CSeleniumITHelper(Class<?> appClass, Map<String, String> properties) { super(appClass, properties); userEmail = AAD_B2C_USER_EMAIL; userPassword = AAD_B2C_USER_PASSWORD; }
class AADB2CSeleniumITHelper extends SeleniumITHelper { private String userEmail; private String userPassword; static { DEFAULT_PROPERTIES.put("azure.activedirectory.b2c.tenant", AADB2CTestUtils.AAD_B2C_TENANT); DEFAULT_PROPERTIES.put("azure.activedirectory.b2c.client-id", AADB2CTestUtils.AAD_B2C_CLIENT_ID); DEFAULT_PROPERTIES.put("azure.activedirectory.b2c.client-secret", AADB2CTestUtils.AAD_B2C_CLIENT_SECRET); DEFAULT_PROPERTIES.put("azure.activedirectory.b2c.reply-url", AADB2CTestUtils.AAD_B2C_REPLY_URL); DEFAULT_PROPERTIES .put("azure.activedirectory.b2c.user-flows.sign-up-or-sign-in", AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN); DEFAULT_PROPERTIES .put("azure.activedirectory.b2c.user-flows.profile-edit", AADB2CTestUtils.AAD_B2C_PROFILE_EDIT); } public void signIn() { driver.get(app.root()); wait.until(presenceOfElementLocated(By.id("email"))).sendKeys(userEmail); wait.until(presenceOfElementLocated(By.id("password"))).sendKeys(userPassword); wait.until(ExpectedConditions.elementToBeClickable(By.id("next"))).click(); manualRedirection(); } public void profileEditJobTitle(String newJobTitle) { wait.until(presenceOfElementLocated(By.id("profileEdit"))).click(); changeJobTile(newJobTitle); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).click(); manualRedirection(); } public void logout() { wait.until(presenceOfElementLocated(By.id("logout"))).click(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).submit(); manualRedirection(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector( "a[href='/oauth2/authorization/" + AADB2CTestUtils.AAD_B2C_SIGN_UP_OR_SIGN_IN + "']"))).click(); } private void manualRedirection() { wait.until(ExpectedConditions.urlMatches("^http: String currentUrl = driver.getCurrentUrl(); String newCurrentUrl = currentUrl.replaceFirst("http: driver.get(newCurrentUrl); } public void changeJobTile(String newValue) { String elementId = "jobTitle"; wait.until(presenceOfElementLocated(By.id(elementId))).clear(); wait.until(presenceOfElementLocated(By.id(elementId))).sendKeys(newValue); } public String getJobTitle() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[10]")) .findElement(By.xpath("th[2]")) .getText(); } public String getName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[2]")) .findElement(By.xpath("th[2]")) .getText(); } public String getUserFlowName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[last()]")) .findElement(By.xpath("th[2]")) .getText(); } public String getSignInButtonText() { return wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).getText(); } }
class AADB2CSeleniumITHelper extends SeleniumITHelper { private String userEmail; private String userPassword; public static Map<String, String> createDefaultProperteis() { Map<String, String> defaultProperteis = new HashMap<>(); defaultProperteis.put("azure.activedirectory.b2c.tenant", AAD_B2C_TENANT); defaultProperteis.put("azure.activedirectory.b2c.client-id", AAD_B2C_CLIENT_ID); defaultProperteis.put("azure.activedirectory.b2c.client-secret", AAD_B2C_CLIENT_SECRET); defaultProperteis.put("azure.activedirectory.b2c.reply-url", AAD_B2C_REPLY_URL); defaultProperteis .put("azure.activedirectory.b2c.user-flows.profile-edit", AAD_B2C_PROFILE_EDIT); defaultProperteis .put("azure.activedirectory.b2c.user-flows.sign-up-or-sign-in", AAD_B2C_SIGN_UP_OR_SIGN_IN); return defaultProperteis; } public void logIn() { driver.get(app.root()); wait.until(presenceOfElementLocated(By.id("email"))).sendKeys(userEmail); wait.until(presenceOfElementLocated(By.id("password"))).sendKeys(userPassword); wait.until(presenceOfElementLocated(By.cssSelector("button[type='submit']"))).sendKeys(Keys.ENTER); manualRedirection(); } public void profileEditJobTitle(String newJobTitle) { wait.until(presenceOfElementLocated(By.id("profileEdit"))).click(); changeJobTile(newJobTitle); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).click(); manualRedirection(); } public void logout() { wait.until(presenceOfElementLocated(By.id("logout"))).click(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).submit(); manualRedirection(); wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector( "a[href='/oauth2/authorization/" + AAD_B2C_SIGN_UP_OR_SIGN_IN + "']"))).click(); } private void manualRedirection() { wait.until(ExpectedConditions.urlMatches("^http: String currentUrl = driver.getCurrentUrl(); String newCurrentUrl = currentUrl.replaceFirst("http: driver.get(newCurrentUrl); } public void changeJobTile(String newValue) { String elementId = "jobTitle"; wait.until(presenceOfElementLocated(By.id(elementId))).clear(); wait.until(presenceOfElementLocated(By.id(elementId))).sendKeys(newValue); } public String getJobTitle() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[10]")) .findElement(By.xpath("th[2]")) .getText(); } public String getName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[2]")) .findElement(By.xpath("th[2]")) .getText(); } public String getUserFlowName() { return driver.findElement(By.cssSelector("tbody")) .findElement(By.xpath("tr[last()]")) .findElement(By.xpath("th[2]")) .getText(); } public String getSignInButtonText() { return wait.until(ExpectedConditions.elementToBeClickable(By.cssSelector("button[type='submit']"))).getText(); } }
resource leak. this is not closed anywhere.
public void beforeClass() throws IllegalAccessException { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(client, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; }
client = new CosmosClientBuilder()
public void beforeClass() { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(client); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; }
class SessionNotAvailableRetryTest extends TestSuiteBase { private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) @DataProvider(name = "preferredRegions") private Object[][] preferredRegions() { List<String> preferredLocations1 = new ArrayList<>(); List<String> regionalSuffix1 = new ArrayList<>(); List<String> preferredLocations2 = new ArrayList<>(); List<String> regionalSuffix2 = new ArrayList<>(); Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations1.add(accountLocation.getName()); regionalSuffix1.add(getRegionalSuffix(accountLocation.getEndpoint(), TestConfigurations.HOST)); } for (int i = preferredLocations1.size() - 1; i >= 0; i--) { preferredLocations2.add(preferredLocations1.get(i)); regionalSuffix2.add(regionalSuffix1.get(i)); } return new Object[][]{ new Object[]{preferredLocations1, regionalSuffix1, OperationType.Read}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Read}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Query}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Query}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Create}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Create}, }; } @Test(groups = {"multi-master"}, dataProvider = "preferredRegions") public void sessionNotAvailableRetryMultiMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) throws IllegalAccessException { CosmosAsyncClient preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(preferredListClient, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; ServerStoreModel storeModel = (ServerStoreModel) FieldUtils.readField(rxDocumentClient, "storeModel", true); StoreClient storeClient = (StoreClient) FieldUtils.readField(storeModel, "storeClient", true); ReplicatedResourceClient replicatedResourceClient = (ReplicatedResourceClient) FieldUtils.readField(storeClient, "replicatedResourceClient", true); ConsistencyReader consistencyReader = (ConsistencyReader) FieldUtils.readField(replicatedResourceClient, "consistencyReader", true); ConsistencyWriter consistencyWriter = (ConsistencyWriter) FieldUtils.readField(replicatedResourceClient, "consistencyWriter", true); StoreReader storeReader = (StoreReader) FieldUtils.readField(consistencyReader, "storeReader", true); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); FieldUtils.writeField(storeReader, "transportClient", spyRntbdTransportClient, true); FieldUtils.writeField(consistencyWriter, "transportClient", spyRntbdTransportClient, true); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, InternalObjectNode.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, InternalObjectNode.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { InternalObjectNode node = new InternalObjectNode(); node.setId("Test"); node.set("mypk", "Test"); cosmosAsyncContainer.createItem(node, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(preferredLocations.size()); int numberOfRegionRetried = preferredLocations.size() + 2; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); for (int i = 1; i <= preferredLocations.size(); i++) { assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(i % regionalSuffix.size())); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; } assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } @Test(groups = {"simple"}, dataProvider = "preferredRegions") public void sessionNotAvailableRetrySingleMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) throws IllegalAccessException { CosmosAsyncClient preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(preferredListClient, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; ServerStoreModel storeModel = (ServerStoreModel) FieldUtils.readField(rxDocumentClient, "storeModel", true); StoreClient storeClient = (StoreClient) FieldUtils.readField(storeModel, "storeClient", true); ReplicatedResourceClient replicatedResourceClient = (ReplicatedResourceClient) FieldUtils.readField(storeClient, "replicatedResourceClient", true); ConsistencyReader consistencyReader = (ConsistencyReader) FieldUtils.readField(replicatedResourceClient, "consistencyReader", true); ConsistencyWriter consistencyWriter = (ConsistencyWriter) FieldUtils.readField(replicatedResourceClient, "consistencyWriter", true); StoreReader storeReader = (StoreReader) FieldUtils.readField(consistencyReader, "storeReader", true); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); FieldUtils.writeField(storeReader, "transportClient", spyRntbdTransportClient, true); FieldUtils.writeField(consistencyWriter, "transportClient", spyRntbdTransportClient, true); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); PartitionKey partitionKey = new PartitionKey("Test"); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); FieldUtils.writeField(cosmosException, "responseHeaders", responseHeaders, true); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("TestId", partitionKey, InternalObjectNode.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(new PartitionKey("Test")); cosmosAsyncContainer.queryItems(query, requestOptions, InternalObjectNode.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { InternalObjectNode node = new InternalObjectNode(); node.setId("Test"); node.set("mypk", "Test"); cosmosAsyncContainer.createItem(node, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; if (operationType.equals(OperationType.Create)) { assertThat(uniqueHost.size()).isEqualTo(1); assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } else { if (regionalSuffix.get(0).equals(masterOrHubRegionSuffix)) { assertThat(uniqueHost.size()).isEqualTo(1); } else { assertThat(uniqueHost.size()).isEqualTo(2); } assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } } private String getRegionalSuffix(String str1, String str2) { int initialIndex = findInitialIndex(str1, str2); int indexFromLast = findIndexFromLast(str1, str2); return str1.substring(initialIndex + 1, str1.length() - indexFromLast); } private int findInitialIndex(String str1, String str2) { int counter = 0; while (str1.charAt(counter) == str2.charAt(counter)) { counter++; } return counter; } private int findIndexFromLast(String str1, String str2) { int length1 = str1.length(); int length2 = str2.length(); int counter = 0; while (str1.charAt(length1 - 1 - counter) == str2.charAt(length2 - 1 - counter)) { counter++; } return counter; } private class RntbdTransportClientTest extends TransportClient { @Override protected Mono<StoreResponse> invokeStoreAsync(Uri physicalAddress, RxDocumentServiceRequest request) { return Mono.empty(); } @Override public void close() { } } }
class SessionNotAvailableRetryTest extends TestSuiteBase { private static final int TIMEOUT = 60000; private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; @BeforeClass(groups = {"multi-region", "multi-master"}, timeOut = SETUP_TIMEOUT) @AfterClass(groups = {"multi-region", "multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } @DataProvider(name = "preferredRegions") private Object[][] preferredRegions() { List<String> preferredLocations1 = new ArrayList<>(); List<String> regionalSuffix1 = new ArrayList<>(); List<String> preferredLocations2 = new ArrayList<>(); List<String> regionalSuffix2 = new ArrayList<>(); Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations1.add(accountLocation.getName()); regionalSuffix1.add(getRegionalSuffix(accountLocation.getEndpoint(), TestConfigurations.HOST)); } for (int i = preferredLocations1.size() - 1; i >= 0; i--) { preferredLocations2.add(preferredLocations1.get(i)); regionalSuffix2.add(regionalSuffix1.get(i)); } return new Object[][]{ new Object[]{preferredLocations1, regionalSuffix1, OperationType.Read}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Read}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Query}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Query}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Create}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Create}, }; } @DataProvider(name = "operations") private Object[][] operations() { return new Object[][]{ new Object[]{OperationType.Read}, new Object[]{OperationType.Query}, new Object[]{OperationType.Create}, }; } @Test(groups = {"multi-master"}, dataProvider = "preferredRegions", timeOut = TIMEOUT) public void sessionNotAvailableRetryMultiMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(preferredLocations.size()); int numberOfRegionRetried = preferredLocations.size() + 2; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); for (int i = 1; i <= preferredLocations.size(); i++) { assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(i % regionalSuffix.size())); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; } assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } finally { safeClose(preferredListClient); } } @Test(groups = {"multi-region"}, dataProvider = "preferredRegions", timeOut = TIMEOUT) public void sessionNotAvailableRetrySingleMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); PartitionKey partitionKey = new PartitionKey("Test"); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); FieldUtils.writeField(cosmosException, "responseHeaders", responseHeaders, true); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("TestId", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(new PartitionKey("Test")); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; if (operationType.equals(OperationType.Create)) { assertThat(uniqueHost.size()).isEqualTo(1); assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } else { if (regionalSuffix.get(0).equals(masterOrHubRegionSuffix)) { assertThat(uniqueHost.size()).isEqualTo(1); } else { assertThat(uniqueHost.size()).isEqualTo(2); } assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } } finally { safeClose(preferredListClient); } } @Test(groups = {"multi-region", "multi-master"}, dataProvider = "operations", timeOut = TIMEOUT) public void sessionNotAvailableRetryWithoutPreferredList(OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(1); String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } finally { safeClose(preferredListClient); } } private String getRegionalSuffix(String str1, String str2) { int initialIndex = findInitialIndex(str1, str2); int indexFromLast = findIndexFromLast(str1, str2); return str1.substring(initialIndex + 1, str1.length() - indexFromLast); } private int findInitialIndex(String str1, String str2) { int counter = 0; while (str1.charAt(counter) == str2.charAt(counter)) { counter++; } return counter; } private int findIndexFromLast(String str1, String str2) { int length1 = str1.length(); int length2 = str2.length(); int counter = 0; while (str1.charAt(length1 - 1 - counter) == str2.charAt(length2 - 1 - counter)) { counter++; } return counter; } private class RntbdTransportClientTest extends TransportClient { @Override protected Mono<StoreResponse> invokeStoreAsync(Uri physicalAddress, RxDocumentServiceRequest request) { return Mono.empty(); } @Override public void close() { } } private class TestItem { private String id; private String mypk; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } } }
we have a helper method please use this: ReflectionUtils.getAsyncClient(client)
public void beforeClass() throws IllegalAccessException { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(client, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; }
AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(client,
public void beforeClass() { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(client); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; }
class SessionNotAvailableRetryTest extends TestSuiteBase { private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) @DataProvider(name = "preferredRegions") private Object[][] preferredRegions() { List<String> preferredLocations1 = new ArrayList<>(); List<String> regionalSuffix1 = new ArrayList<>(); List<String> preferredLocations2 = new ArrayList<>(); List<String> regionalSuffix2 = new ArrayList<>(); Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations1.add(accountLocation.getName()); regionalSuffix1.add(getRegionalSuffix(accountLocation.getEndpoint(), TestConfigurations.HOST)); } for (int i = preferredLocations1.size() - 1; i >= 0; i--) { preferredLocations2.add(preferredLocations1.get(i)); regionalSuffix2.add(regionalSuffix1.get(i)); } return new Object[][]{ new Object[]{preferredLocations1, regionalSuffix1, OperationType.Read}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Read}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Query}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Query}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Create}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Create}, }; } @Test(groups = {"multi-master"}, dataProvider = "preferredRegions") public void sessionNotAvailableRetryMultiMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) throws IllegalAccessException { CosmosAsyncClient preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(preferredListClient, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; ServerStoreModel storeModel = (ServerStoreModel) FieldUtils.readField(rxDocumentClient, "storeModel", true); StoreClient storeClient = (StoreClient) FieldUtils.readField(storeModel, "storeClient", true); ReplicatedResourceClient replicatedResourceClient = (ReplicatedResourceClient) FieldUtils.readField(storeClient, "replicatedResourceClient", true); ConsistencyReader consistencyReader = (ConsistencyReader) FieldUtils.readField(replicatedResourceClient, "consistencyReader", true); ConsistencyWriter consistencyWriter = (ConsistencyWriter) FieldUtils.readField(replicatedResourceClient, "consistencyWriter", true); StoreReader storeReader = (StoreReader) FieldUtils.readField(consistencyReader, "storeReader", true); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); FieldUtils.writeField(storeReader, "transportClient", spyRntbdTransportClient, true); FieldUtils.writeField(consistencyWriter, "transportClient", spyRntbdTransportClient, true); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, InternalObjectNode.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, InternalObjectNode.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { InternalObjectNode node = new InternalObjectNode(); node.setId("Test"); node.set("mypk", "Test"); cosmosAsyncContainer.createItem(node, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(preferredLocations.size()); int numberOfRegionRetried = preferredLocations.size() + 2; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); for (int i = 1; i <= preferredLocations.size(); i++) { assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(i % regionalSuffix.size())); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; } assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } @Test(groups = {"simple"}, dataProvider = "preferredRegions") public void sessionNotAvailableRetrySingleMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) throws IllegalAccessException { CosmosAsyncClient preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(preferredListClient, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; ServerStoreModel storeModel = (ServerStoreModel) FieldUtils.readField(rxDocumentClient, "storeModel", true); StoreClient storeClient = (StoreClient) FieldUtils.readField(storeModel, "storeClient", true); ReplicatedResourceClient replicatedResourceClient = (ReplicatedResourceClient) FieldUtils.readField(storeClient, "replicatedResourceClient", true); ConsistencyReader consistencyReader = (ConsistencyReader) FieldUtils.readField(replicatedResourceClient, "consistencyReader", true); ConsistencyWriter consistencyWriter = (ConsistencyWriter) FieldUtils.readField(replicatedResourceClient, "consistencyWriter", true); StoreReader storeReader = (StoreReader) FieldUtils.readField(consistencyReader, "storeReader", true); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); FieldUtils.writeField(storeReader, "transportClient", spyRntbdTransportClient, true); FieldUtils.writeField(consistencyWriter, "transportClient", spyRntbdTransportClient, true); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); PartitionKey partitionKey = new PartitionKey("Test"); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); FieldUtils.writeField(cosmosException, "responseHeaders", responseHeaders, true); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("TestId", partitionKey, InternalObjectNode.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(new PartitionKey("Test")); cosmosAsyncContainer.queryItems(query, requestOptions, InternalObjectNode.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { InternalObjectNode node = new InternalObjectNode(); node.setId("Test"); node.set("mypk", "Test"); cosmosAsyncContainer.createItem(node, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; if (operationType.equals(OperationType.Create)) { assertThat(uniqueHost.size()).isEqualTo(1); assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } else { if (regionalSuffix.get(0).equals(masterOrHubRegionSuffix)) { assertThat(uniqueHost.size()).isEqualTo(1); } else { assertThat(uniqueHost.size()).isEqualTo(2); } assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } } private String getRegionalSuffix(String str1, String str2) { int initialIndex = findInitialIndex(str1, str2); int indexFromLast = findIndexFromLast(str1, str2); return str1.substring(initialIndex + 1, str1.length() - indexFromLast); } private int findInitialIndex(String str1, String str2) { int counter = 0; while (str1.charAt(counter) == str2.charAt(counter)) { counter++; } return counter; } private int findIndexFromLast(String str1, String str2) { int length1 = str1.length(); int length2 = str2.length(); int counter = 0; while (str1.charAt(length1 - 1 - counter) == str2.charAt(length2 - 1 - counter)) { counter++; } return counter; } private class RntbdTransportClientTest extends TransportClient { @Override protected Mono<StoreResponse> invokeStoreAsync(Uri physicalAddress, RxDocumentServiceRequest request) { return Mono.empty(); } @Override public void close() { } } }
class SessionNotAvailableRetryTest extends TestSuiteBase { private static final int TIMEOUT = 60000; private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; @BeforeClass(groups = {"multi-region", "multi-master"}, timeOut = SETUP_TIMEOUT) @AfterClass(groups = {"multi-region", "multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } @DataProvider(name = "preferredRegions") private Object[][] preferredRegions() { List<String> preferredLocations1 = new ArrayList<>(); List<String> regionalSuffix1 = new ArrayList<>(); List<String> preferredLocations2 = new ArrayList<>(); List<String> regionalSuffix2 = new ArrayList<>(); Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations1.add(accountLocation.getName()); regionalSuffix1.add(getRegionalSuffix(accountLocation.getEndpoint(), TestConfigurations.HOST)); } for (int i = preferredLocations1.size() - 1; i >= 0; i--) { preferredLocations2.add(preferredLocations1.get(i)); regionalSuffix2.add(regionalSuffix1.get(i)); } return new Object[][]{ new Object[]{preferredLocations1, regionalSuffix1, OperationType.Read}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Read}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Query}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Query}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Create}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Create}, }; } @DataProvider(name = "operations") private Object[][] operations() { return new Object[][]{ new Object[]{OperationType.Read}, new Object[]{OperationType.Query}, new Object[]{OperationType.Create}, }; } @Test(groups = {"multi-master"}, dataProvider = "preferredRegions", timeOut = TIMEOUT) public void sessionNotAvailableRetryMultiMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(preferredLocations.size()); int numberOfRegionRetried = preferredLocations.size() + 2; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); for (int i = 1; i <= preferredLocations.size(); i++) { assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(i % regionalSuffix.size())); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; } assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } finally { safeClose(preferredListClient); } } @Test(groups = {"multi-region"}, dataProvider = "preferredRegions", timeOut = TIMEOUT) public void sessionNotAvailableRetrySingleMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); PartitionKey partitionKey = new PartitionKey("Test"); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); FieldUtils.writeField(cosmosException, "responseHeaders", responseHeaders, true); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("TestId", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(new PartitionKey("Test")); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; if (operationType.equals(OperationType.Create)) { assertThat(uniqueHost.size()).isEqualTo(1); assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } else { if (regionalSuffix.get(0).equals(masterOrHubRegionSuffix)) { assertThat(uniqueHost.size()).isEqualTo(1); } else { assertThat(uniqueHost.size()).isEqualTo(2); } assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } } finally { safeClose(preferredListClient); } } @Test(groups = {"multi-region", "multi-master"}, dataProvider = "operations", timeOut = TIMEOUT) public void sessionNotAvailableRetryWithoutPreferredList(OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(1); String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } finally { safeClose(preferredListClient); } } private String getRegionalSuffix(String str1, String str2) { int initialIndex = findInitialIndex(str1, str2); int indexFromLast = findIndexFromLast(str1, str2); return str1.substring(initialIndex + 1, str1.length() - indexFromLast); } private int findInitialIndex(String str1, String str2) { int counter = 0; while (str1.charAt(counter) == str2.charAt(counter)) { counter++; } return counter; } private int findIndexFromLast(String str1, String str2) { int length1 = str1.length(); int length2 = str2.length(); int counter = 0; while (str1.charAt(length1 - 1 - counter) == str2.charAt(length2 - 1 - counter)) { counter++; } return counter; } private class RntbdTransportClientTest extends TransportClient { @Override protected Mono<StoreResponse> invokeStoreAsync(Uri physicalAddress, RxDocumentServiceRequest request) { return Mono.empty(); } @Override public void close() { } } private class TestItem { private String id; private String mypk; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } } }
done
public void beforeClass() throws IllegalAccessException { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(client, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; }
client = new CosmosClientBuilder()
public void beforeClass() { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(client); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; }
class SessionNotAvailableRetryTest extends TestSuiteBase { private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) @DataProvider(name = "preferredRegions") private Object[][] preferredRegions() { List<String> preferredLocations1 = new ArrayList<>(); List<String> regionalSuffix1 = new ArrayList<>(); List<String> preferredLocations2 = new ArrayList<>(); List<String> regionalSuffix2 = new ArrayList<>(); Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations1.add(accountLocation.getName()); regionalSuffix1.add(getRegionalSuffix(accountLocation.getEndpoint(), TestConfigurations.HOST)); } for (int i = preferredLocations1.size() - 1; i >= 0; i--) { preferredLocations2.add(preferredLocations1.get(i)); regionalSuffix2.add(regionalSuffix1.get(i)); } return new Object[][]{ new Object[]{preferredLocations1, regionalSuffix1, OperationType.Read}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Read}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Query}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Query}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Create}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Create}, }; } @Test(groups = {"multi-master"}, dataProvider = "preferredRegions") public void sessionNotAvailableRetryMultiMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) throws IllegalAccessException { CosmosAsyncClient preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(preferredListClient, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; ServerStoreModel storeModel = (ServerStoreModel) FieldUtils.readField(rxDocumentClient, "storeModel", true); StoreClient storeClient = (StoreClient) FieldUtils.readField(storeModel, "storeClient", true); ReplicatedResourceClient replicatedResourceClient = (ReplicatedResourceClient) FieldUtils.readField(storeClient, "replicatedResourceClient", true); ConsistencyReader consistencyReader = (ConsistencyReader) FieldUtils.readField(replicatedResourceClient, "consistencyReader", true); ConsistencyWriter consistencyWriter = (ConsistencyWriter) FieldUtils.readField(replicatedResourceClient, "consistencyWriter", true); StoreReader storeReader = (StoreReader) FieldUtils.readField(consistencyReader, "storeReader", true); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); FieldUtils.writeField(storeReader, "transportClient", spyRntbdTransportClient, true); FieldUtils.writeField(consistencyWriter, "transportClient", spyRntbdTransportClient, true); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, InternalObjectNode.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, InternalObjectNode.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { InternalObjectNode node = new InternalObjectNode(); node.setId("Test"); node.set("mypk", "Test"); cosmosAsyncContainer.createItem(node, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(preferredLocations.size()); int numberOfRegionRetried = preferredLocations.size() + 2; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); for (int i = 1; i <= preferredLocations.size(); i++) { assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(i % regionalSuffix.size())); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; } assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } @Test(groups = {"simple"}, dataProvider = "preferredRegions") public void sessionNotAvailableRetrySingleMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) throws IllegalAccessException { CosmosAsyncClient preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(preferredListClient, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; ServerStoreModel storeModel = (ServerStoreModel) FieldUtils.readField(rxDocumentClient, "storeModel", true); StoreClient storeClient = (StoreClient) FieldUtils.readField(storeModel, "storeClient", true); ReplicatedResourceClient replicatedResourceClient = (ReplicatedResourceClient) FieldUtils.readField(storeClient, "replicatedResourceClient", true); ConsistencyReader consistencyReader = (ConsistencyReader) FieldUtils.readField(replicatedResourceClient, "consistencyReader", true); ConsistencyWriter consistencyWriter = (ConsistencyWriter) FieldUtils.readField(replicatedResourceClient, "consistencyWriter", true); StoreReader storeReader = (StoreReader) FieldUtils.readField(consistencyReader, "storeReader", true); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); FieldUtils.writeField(storeReader, "transportClient", spyRntbdTransportClient, true); FieldUtils.writeField(consistencyWriter, "transportClient", spyRntbdTransportClient, true); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); PartitionKey partitionKey = new PartitionKey("Test"); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); FieldUtils.writeField(cosmosException, "responseHeaders", responseHeaders, true); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("TestId", partitionKey, InternalObjectNode.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(new PartitionKey("Test")); cosmosAsyncContainer.queryItems(query, requestOptions, InternalObjectNode.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { InternalObjectNode node = new InternalObjectNode(); node.setId("Test"); node.set("mypk", "Test"); cosmosAsyncContainer.createItem(node, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; if (operationType.equals(OperationType.Create)) { assertThat(uniqueHost.size()).isEqualTo(1); assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } else { if (regionalSuffix.get(0).equals(masterOrHubRegionSuffix)) { assertThat(uniqueHost.size()).isEqualTo(1); } else { assertThat(uniqueHost.size()).isEqualTo(2); } assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } } private String getRegionalSuffix(String str1, String str2) { int initialIndex = findInitialIndex(str1, str2); int indexFromLast = findIndexFromLast(str1, str2); return str1.substring(initialIndex + 1, str1.length() - indexFromLast); } private int findInitialIndex(String str1, String str2) { int counter = 0; while (str1.charAt(counter) == str2.charAt(counter)) { counter++; } return counter; } private int findIndexFromLast(String str1, String str2) { int length1 = str1.length(); int length2 = str2.length(); int counter = 0; while (str1.charAt(length1 - 1 - counter) == str2.charAt(length2 - 1 - counter)) { counter++; } return counter; } private class RntbdTransportClientTest extends TransportClient { @Override protected Mono<StoreResponse> invokeStoreAsync(Uri physicalAddress, RxDocumentServiceRequest request) { return Mono.empty(); } @Override public void close() { } } }
class SessionNotAvailableRetryTest extends TestSuiteBase { private static final int TIMEOUT = 60000; private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; @BeforeClass(groups = {"multi-region", "multi-master"}, timeOut = SETUP_TIMEOUT) @AfterClass(groups = {"multi-region", "multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } @DataProvider(name = "preferredRegions") private Object[][] preferredRegions() { List<String> preferredLocations1 = new ArrayList<>(); List<String> regionalSuffix1 = new ArrayList<>(); List<String> preferredLocations2 = new ArrayList<>(); List<String> regionalSuffix2 = new ArrayList<>(); Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations1.add(accountLocation.getName()); regionalSuffix1.add(getRegionalSuffix(accountLocation.getEndpoint(), TestConfigurations.HOST)); } for (int i = preferredLocations1.size() - 1; i >= 0; i--) { preferredLocations2.add(preferredLocations1.get(i)); regionalSuffix2.add(regionalSuffix1.get(i)); } return new Object[][]{ new Object[]{preferredLocations1, regionalSuffix1, OperationType.Read}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Read}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Query}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Query}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Create}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Create}, }; } @DataProvider(name = "operations") private Object[][] operations() { return new Object[][]{ new Object[]{OperationType.Read}, new Object[]{OperationType.Query}, new Object[]{OperationType.Create}, }; } @Test(groups = {"multi-master"}, dataProvider = "preferredRegions", timeOut = TIMEOUT) public void sessionNotAvailableRetryMultiMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(preferredLocations.size()); int numberOfRegionRetried = preferredLocations.size() + 2; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); for (int i = 1; i <= preferredLocations.size(); i++) { assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(i % regionalSuffix.size())); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; } assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } finally { safeClose(preferredListClient); } } @Test(groups = {"multi-region"}, dataProvider = "preferredRegions", timeOut = TIMEOUT) public void sessionNotAvailableRetrySingleMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); PartitionKey partitionKey = new PartitionKey("Test"); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); FieldUtils.writeField(cosmosException, "responseHeaders", responseHeaders, true); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("TestId", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(new PartitionKey("Test")); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; if (operationType.equals(OperationType.Create)) { assertThat(uniqueHost.size()).isEqualTo(1); assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } else { if (regionalSuffix.get(0).equals(masterOrHubRegionSuffix)) { assertThat(uniqueHost.size()).isEqualTo(1); } else { assertThat(uniqueHost.size()).isEqualTo(2); } assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } } finally { safeClose(preferredListClient); } } @Test(groups = {"multi-region", "multi-master"}, dataProvider = "operations", timeOut = TIMEOUT) public void sessionNotAvailableRetryWithoutPreferredList(OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(1); String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } finally { safeClose(preferredListClient); } } private String getRegionalSuffix(String str1, String str2) { int initialIndex = findInitialIndex(str1, str2); int indexFromLast = findIndexFromLast(str1, str2); return str1.substring(initialIndex + 1, str1.length() - indexFromLast); } private int findInitialIndex(String str1, String str2) { int counter = 0; while (str1.charAt(counter) == str2.charAt(counter)) { counter++; } return counter; } private int findIndexFromLast(String str1, String str2) { int length1 = str1.length(); int length2 = str2.length(); int counter = 0; while (str1.charAt(length1 - 1 - counter) == str2.charAt(length2 - 1 - counter)) { counter++; } return counter; } private class RntbdTransportClientTest extends TransportClient { @Override protected Mono<StoreResponse> invokeStoreAsync(Uri physicalAddress, RxDocumentServiceRequest request) { return Mono.empty(); } @Override public void close() { } } private class TestItem { private String id; private String mypk; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } } }
done
public void beforeClass() throws IllegalAccessException { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(client, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; }
AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(client,
public void beforeClass() { client = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(client); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; GlobalEndpointManager globalEndpointManager = ReflectionUtils.getGlobalEndpointManager(rxDocumentClient); DatabaseAccount databaseAccount = globalEndpointManager.getLatestDatabaseAccount(); this.databaseAccount = databaseAccount; }
class SessionNotAvailableRetryTest extends TestSuiteBase { private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) @DataProvider(name = "preferredRegions") private Object[][] preferredRegions() { List<String> preferredLocations1 = new ArrayList<>(); List<String> regionalSuffix1 = new ArrayList<>(); List<String> preferredLocations2 = new ArrayList<>(); List<String> regionalSuffix2 = new ArrayList<>(); Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations1.add(accountLocation.getName()); regionalSuffix1.add(getRegionalSuffix(accountLocation.getEndpoint(), TestConfigurations.HOST)); } for (int i = preferredLocations1.size() - 1; i >= 0; i--) { preferredLocations2.add(preferredLocations1.get(i)); regionalSuffix2.add(regionalSuffix1.get(i)); } return new Object[][]{ new Object[]{preferredLocations1, regionalSuffix1, OperationType.Read}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Read}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Query}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Query}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Create}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Create}, }; } @Test(groups = {"multi-master"}, dataProvider = "preferredRegions") public void sessionNotAvailableRetryMultiMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) throws IllegalAccessException { CosmosAsyncClient preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(preferredListClient, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; ServerStoreModel storeModel = (ServerStoreModel) FieldUtils.readField(rxDocumentClient, "storeModel", true); StoreClient storeClient = (StoreClient) FieldUtils.readField(storeModel, "storeClient", true); ReplicatedResourceClient replicatedResourceClient = (ReplicatedResourceClient) FieldUtils.readField(storeClient, "replicatedResourceClient", true); ConsistencyReader consistencyReader = (ConsistencyReader) FieldUtils.readField(replicatedResourceClient, "consistencyReader", true); ConsistencyWriter consistencyWriter = (ConsistencyWriter) FieldUtils.readField(replicatedResourceClient, "consistencyWriter", true); StoreReader storeReader = (StoreReader) FieldUtils.readField(consistencyReader, "storeReader", true); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); FieldUtils.writeField(storeReader, "transportClient", spyRntbdTransportClient, true); FieldUtils.writeField(consistencyWriter, "transportClient", spyRntbdTransportClient, true); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, InternalObjectNode.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, InternalObjectNode.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { InternalObjectNode node = new InternalObjectNode(); node.setId("Test"); node.set("mypk", "Test"); cosmosAsyncContainer.createItem(node, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(preferredLocations.size()); int numberOfRegionRetried = preferredLocations.size() + 2; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); for (int i = 1; i <= preferredLocations.size(); i++) { assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(i % regionalSuffix.size())); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; } assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } @Test(groups = {"simple"}, dataProvider = "preferredRegions") public void sessionNotAvailableRetrySingleMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) throws IllegalAccessException { CosmosAsyncClient preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = (AsyncDocumentClient) FieldUtils.readField(preferredListClient, "asyncDocumentClient", true); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; ServerStoreModel storeModel = (ServerStoreModel) FieldUtils.readField(rxDocumentClient, "storeModel", true); StoreClient storeClient = (StoreClient) FieldUtils.readField(storeModel, "storeClient", true); ReplicatedResourceClient replicatedResourceClient = (ReplicatedResourceClient) FieldUtils.readField(storeClient, "replicatedResourceClient", true); ConsistencyReader consistencyReader = (ConsistencyReader) FieldUtils.readField(replicatedResourceClient, "consistencyReader", true); ConsistencyWriter consistencyWriter = (ConsistencyWriter) FieldUtils.readField(replicatedResourceClient, "consistencyWriter", true); StoreReader storeReader = (StoreReader) FieldUtils.readField(consistencyReader, "storeReader", true); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); FieldUtils.writeField(storeReader, "transportClient", spyRntbdTransportClient, true); FieldUtils.writeField(consistencyWriter, "transportClient", spyRntbdTransportClient, true); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); PartitionKey partitionKey = new PartitionKey("Test"); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); FieldUtils.writeField(cosmosException, "responseHeaders", responseHeaders, true); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("TestId", partitionKey, InternalObjectNode.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(new PartitionKey("Test")); cosmosAsyncContainer.queryItems(query, requestOptions, InternalObjectNode.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { InternalObjectNode node = new InternalObjectNode(); node.setId("Test"); node.set("mypk", "Test"); cosmosAsyncContainer.createItem(node, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; if (operationType.equals(OperationType.Create)) { assertThat(uniqueHost.size()).isEqualTo(1); assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } else { if (regionalSuffix.get(0).equals(masterOrHubRegionSuffix)) { assertThat(uniqueHost.size()).isEqualTo(1); } else { assertThat(uniqueHost.size()).isEqualTo(2); } assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } } private String getRegionalSuffix(String str1, String str2) { int initialIndex = findInitialIndex(str1, str2); int indexFromLast = findIndexFromLast(str1, str2); return str1.substring(initialIndex + 1, str1.length() - indexFromLast); } private int findInitialIndex(String str1, String str2) { int counter = 0; while (str1.charAt(counter) == str2.charAt(counter)) { counter++; } return counter; } private int findIndexFromLast(String str1, String str2) { int length1 = str1.length(); int length2 = str2.length(); int counter = 0; while (str1.charAt(length1 - 1 - counter) == str2.charAt(length2 - 1 - counter)) { counter++; } return counter; } private class RntbdTransportClientTest extends TransportClient { @Override protected Mono<StoreResponse> invokeStoreAsync(Uri physicalAddress, RxDocumentServiceRequest request) { return Mono.empty(); } @Override public void close() { } } }
class SessionNotAvailableRetryTest extends TestSuiteBase { private static final int TIMEOUT = 60000; private CosmosAsyncClient client; private CosmosAsyncContainer cosmosAsyncContainer; private DatabaseAccount databaseAccount; @BeforeClass(groups = {"multi-region", "multi-master"}, timeOut = SETUP_TIMEOUT) @AfterClass(groups = {"multi-region", "multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } @DataProvider(name = "preferredRegions") private Object[][] preferredRegions() { List<String> preferredLocations1 = new ArrayList<>(); List<String> regionalSuffix1 = new ArrayList<>(); List<String> preferredLocations2 = new ArrayList<>(); List<String> regionalSuffix2 = new ArrayList<>(); Iterator<DatabaseAccountLocation> locationIterator = this.databaseAccount.getReadableLocations().iterator(); while (locationIterator.hasNext()) { DatabaseAccountLocation accountLocation = locationIterator.next(); preferredLocations1.add(accountLocation.getName()); regionalSuffix1.add(getRegionalSuffix(accountLocation.getEndpoint(), TestConfigurations.HOST)); } for (int i = preferredLocations1.size() - 1; i >= 0; i--) { preferredLocations2.add(preferredLocations1.get(i)); regionalSuffix2.add(regionalSuffix1.get(i)); } return new Object[][]{ new Object[]{preferredLocations1, regionalSuffix1, OperationType.Read}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Read}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Query}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Query}, new Object[]{preferredLocations1, regionalSuffix1, OperationType.Create}, new Object[]{preferredLocations2, regionalSuffix2, OperationType.Create}, }; } @DataProvider(name = "operations") private Object[][] operations() { return new Object[][]{ new Object[]{OperationType.Read}, new Object[]{OperationType.Query}, new Object[]{OperationType.Create}, }; } @Test(groups = {"multi-master"}, dataProvider = "preferredRegions", timeOut = TIMEOUT) public void sessionNotAvailableRetryMultiMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(preferredLocations.size()); int numberOfRegionRetried = preferredLocations.size() + 2; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); for (int i = 1; i <= preferredLocations.size(); i++) { assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(i % regionalSuffix.size())); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; } assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } finally { safeClose(preferredListClient); } } @Test(groups = {"multi-region"}, dataProvider = "preferredRegions", timeOut = TIMEOUT) public void sessionNotAvailableRetrySingleMaster(List<String> preferredLocations, List<String> regionalSuffix, OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .preferredRegions(preferredLocations) .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); PartitionKey partitionKey = new PartitionKey("Test"); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); FieldUtils.writeField(cosmosException, "responseHeaders", responseHeaders, true); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("TestId", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(new PartitionKey("Test")); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; if (operationType.equals(OperationType.Create)) { assertThat(uniqueHost.size()).isEqualTo(1); assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } else { if (regionalSuffix.get(0).equals(masterOrHubRegionSuffix)) { assertThat(uniqueHost.size()).isEqualTo(1); } else { assertThat(uniqueHost.size()).isEqualTo(2); } assertThat(uris.get(totalRetries / 2)).contains(regionalSuffix.get(0)); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(regionalSuffix.get(0)); } } finally { safeClose(preferredListClient); } } @Test(groups = {"multi-region", "multi-master"}, dataProvider = "operations", timeOut = TIMEOUT) public void sessionNotAvailableRetryWithoutPreferredList(OperationType operationType) { CosmosAsyncClient preferredListClient = null; try { preferredListClient = new CosmosClientBuilder() .endpoint(TestConfigurations.HOST) .key(TestConfigurations.MASTER_KEY) .contentResponseOnWriteEnabled(true) .directMode() .buildAsyncClient(); AsyncDocumentClient asyncDocumentClient = ReflectionUtils.getAsyncDocumentClient(preferredListClient); RxDocumentClientImpl rxDocumentClient = (RxDocumentClientImpl) asyncDocumentClient; StoreClient storeClient = ReflectionUtils.getStoreClient(rxDocumentClient); ReplicatedResourceClient replicatedResourceClient = ReflectionUtils.getReplicatedResourceClient(storeClient); ConsistencyReader consistencyReader = ReflectionUtils.getConsistencyReader(replicatedResourceClient); ConsistencyWriter consistencyWriter = ReflectionUtils.getConsistencyWriter(replicatedResourceClient); StoreReader storeReader = ReflectionUtils.getStoreReader(consistencyReader); RntbdTransportClientTest rntbdTransportClient = new RntbdTransportClientTest(); RntbdTransportClientTest spyRntbdTransportClient = Mockito.spy(rntbdTransportClient); ReflectionUtils.setTransportClient(storeReader, spyRntbdTransportClient); ReflectionUtils.setTransportClient(consistencyWriter, spyRntbdTransportClient); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(preferredListClient); List<String> uris = new ArrayList<>(); doAnswer((Answer<Mono<StoreResponse>>) invocationOnMock -> { Uri uri = invocationOnMock.getArgumentAt(0, Uri.class); uris.add(uri.getURI().getHost()); CosmosException cosmosException = BridgeInternal.createCosmosException(404); @SuppressWarnings("unchecked") Map<String, String> responseHeaders = (Map<String, String>) FieldUtils.readField(cosmosException, "responseHeaders", true); responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, "1002"); return Mono.error(cosmosException); }).when(spyRntbdTransportClient).invokeStoreAsync(Mockito.any(Uri.class), Mockito.any(RxDocumentServiceRequest.class)); try { PartitionKey partitionKey = new PartitionKey("Test"); if (operationType.equals(OperationType.Read)) { cosmosAsyncContainer.readItem("Test", partitionKey, TestItem.class).block(); } else if (operationType.equals(OperationType.Query)) { String query = "Select * from C"; CosmosQueryRequestOptions requestOptions = new CosmosQueryRequestOptions(); requestOptions.setPartitionKey(partitionKey); cosmosAsyncContainer.queryItems(query, requestOptions, TestItem.class).byPage().blockFirst(); } else if (operationType.equals(OperationType.Create)) { TestItem item = new TestItem(); item.setId("Test"); item.setMypk("Test"); cosmosAsyncContainer.createItem(item, partitionKey, new CosmosItemRequestOptions()).block(); } fail("Request should fail with 404/1002 error"); } catch (CosmosException ex) { assertThat(ex.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); } HashSet<String> uniqueHost = new HashSet<>(); for (String uri : uris) { uniqueHost.add(uri); } assertThat(uniqueHost.size()).isEqualTo(1); String masterOrHubRegionSuffix = getRegionalSuffix(databaseAccount.getWritableLocations().iterator().next().getEndpoint(), TestConfigurations.HOST); int numberOfRegionRetried = 3; int averageRetryBySessionRetryPolicyInOneRegion = uris.size() / numberOfRegionRetried; int totalRetries = averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries / 2)).contains(masterOrHubRegionSuffix); assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); totalRetries = totalRetries + averageRetryBySessionRetryPolicyInOneRegion; assertThat(uris.get(totalRetries + (averageRetryBySessionRetryPolicyInOneRegion) / 2)).contains(masterOrHubRegionSuffix); } finally { safeClose(preferredListClient); } } private String getRegionalSuffix(String str1, String str2) { int initialIndex = findInitialIndex(str1, str2); int indexFromLast = findIndexFromLast(str1, str2); return str1.substring(initialIndex + 1, str1.length() - indexFromLast); } private int findInitialIndex(String str1, String str2) { int counter = 0; while (str1.charAt(counter) == str2.charAt(counter)) { counter++; } return counter; } private int findIndexFromLast(String str1, String str2) { int length1 = str1.length(); int length2 = str2.length(); int counter = 0; while (str1.charAt(length1 - 1 - counter) == str2.charAt(length2 - 1 - counter)) { counter++; } return counter; } private class RntbdTransportClientTest extends TransportClient { @Override protected Mono<StoreResponse> invokeStoreAsync(Uri physicalAddress, RxDocumentServiceRequest request) { return Mono.empty(); } @Override public void close() { } } private class TestItem { private String id; private String mypk; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } } }
why is this changed?
public void validQuerySucceeds(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsClient client = getClient(httpClient, serviceVersion); int pageSize = 3; String floorModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); List<String> roomTwinIds = new ArrayList<>(); try { String roomModelPayload = TestAssetsHelper.getRoomModelPayload(roomModelId, floorModelId); client.createModelsWithResponse(new ArrayList<>(Arrays.asList(roomModelPayload)), Context.NONE); String roomTwin = TestAssetsHelper.getRoomTwinPayload(roomModelId); for (int i = 0; i < pageSize + 1; i++) { String roomTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); roomTwinIds.add(roomTwinId); client.createOrReplaceDigitalTwinWithResponse(roomTwinId, roomTwin, String.class, null, Context.NONE); } String queryString = "SELECT * FROM digitaltwins where IsOccupied = true"; PagedIterable<BasicDigitalTwin> pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class, new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); for (BasicDigitalTwin digitalTwin : pagedQueryResponse) { assertThat(digitalTwin.getContents().get("IsOccupied")) .as("IsOccupied should be true") .isEqualTo(true); } pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class,new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); int pageCount = 0; for (Page<BasicDigitalTwin> digitalTwinsPage : pagedQueryResponse.iterableByPage()) { pageCount++; int elementsPerPage = 0; for (BasicDigitalTwin basicDigitalTwin : digitalTwinsPage.getElements()) { elementsPerPage++; } if (digitalTwinsPage.getContinuationToken() != null) { assertFalse(elementsPerPage < pageSize, "Unexpected page size for a non-terminal page"); } } assertTrue(pageCount > 1, "Expected more than one page of query results"); } finally { try { for (String roomTwinId : roomTwinIds) { client.deleteDigitalTwin(roomTwinId); } if (roomModelId != null) { client.deleteModel(roomModelId); } } catch (Exception ex) { fail("Failed to cleanup due to: ", ex); } } }
int pageSize = 3;
public void validQuerySucceeds(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsClient client = getClient(httpClient, serviceVersion); int pageSize = 3; String floorModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); List<String> roomTwinIds = new ArrayList<>(); try { String roomModelPayload = TestAssetsHelper.getRoomModelPayload(roomModelId, floorModelId); client.createModelsWithResponse(new ArrayList<>(Arrays.asList(roomModelPayload)), Context.NONE); String roomTwin = TestAssetsHelper.getRoomTwinPayload(roomModelId); for (int i = 0; i < pageSize + 1; i++) { String roomTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); roomTwinIds.add(roomTwinId); client.createOrReplaceDigitalTwinWithResponse(roomTwinId, roomTwin, String.class, null, Context.NONE); } String queryString = "SELECT * FROM digitaltwins where IsOccupied = true"; PagedIterable<BasicDigitalTwin> pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class, new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); for (BasicDigitalTwin digitalTwin : pagedQueryResponse) { assertThat(digitalTwin.getContents().get("IsOccupied")) .as("IsOccupied should be true") .isEqualTo(true); } pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class,new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); int pageCount = 0; for (Page<BasicDigitalTwin> digitalTwinsPage : pagedQueryResponse.iterableByPage()) { pageCount++; int elementsPerPage = 0; for (BasicDigitalTwin basicDigitalTwin : digitalTwinsPage.getElements()) { elementsPerPage++; } if (digitalTwinsPage.getContinuationToken() != null) { assertFalse(elementsPerPage < pageSize, "Unexpected page size for a non-terminal page"); } } assertTrue(pageCount > 1, "Expected more than one page of query results"); } finally { try { for (String roomTwinId : roomTwinIds) { client.deleteDigitalTwin(roomTwinId); } if (roomModelId != null) { client.deleteModel(roomModelId); } } catch (Exception ex) { fail("Failed to cleanup due to: ", ex); } } }
class QueryTests extends QueryTestBase { private final ClientLogger logger = new ClientLogger(ComponentsTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
class QueryTests extends QueryTestBase { private final ClientLogger logger = new ClientLogger(ComponentsTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
this change doesn't really matter, I changed it during fixing this test.
public void validQuerySucceeds(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsClient client = getClient(httpClient, serviceVersion); int pageSize = 3; String floorModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); List<String> roomTwinIds = new ArrayList<>(); try { String roomModelPayload = TestAssetsHelper.getRoomModelPayload(roomModelId, floorModelId); client.createModelsWithResponse(new ArrayList<>(Arrays.asList(roomModelPayload)), Context.NONE); String roomTwin = TestAssetsHelper.getRoomTwinPayload(roomModelId); for (int i = 0; i < pageSize + 1; i++) { String roomTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); roomTwinIds.add(roomTwinId); client.createOrReplaceDigitalTwinWithResponse(roomTwinId, roomTwin, String.class, null, Context.NONE); } String queryString = "SELECT * FROM digitaltwins where IsOccupied = true"; PagedIterable<BasicDigitalTwin> pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class, new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); for (BasicDigitalTwin digitalTwin : pagedQueryResponse) { assertThat(digitalTwin.getContents().get("IsOccupied")) .as("IsOccupied should be true") .isEqualTo(true); } pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class,new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); int pageCount = 0; for (Page<BasicDigitalTwin> digitalTwinsPage : pagedQueryResponse.iterableByPage()) { pageCount++; int elementsPerPage = 0; for (BasicDigitalTwin basicDigitalTwin : digitalTwinsPage.getElements()) { elementsPerPage++; } if (digitalTwinsPage.getContinuationToken() != null) { assertFalse(elementsPerPage < pageSize, "Unexpected page size for a non-terminal page"); } } assertTrue(pageCount > 1, "Expected more than one page of query results"); } finally { try { for (String roomTwinId : roomTwinIds) { client.deleteDigitalTwin(roomTwinId); } if (roomModelId != null) { client.deleteModel(roomModelId); } } catch (Exception ex) { fail("Failed to cleanup due to: ", ex); } } }
int pageSize = 3;
public void validQuerySucceeds(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsClient client = getClient(httpClient, serviceVersion); int pageSize = 3; String floorModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); List<String> roomTwinIds = new ArrayList<>(); try { String roomModelPayload = TestAssetsHelper.getRoomModelPayload(roomModelId, floorModelId); client.createModelsWithResponse(new ArrayList<>(Arrays.asList(roomModelPayload)), Context.NONE); String roomTwin = TestAssetsHelper.getRoomTwinPayload(roomModelId); for (int i = 0; i < pageSize + 1; i++) { String roomTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); roomTwinIds.add(roomTwinId); client.createOrReplaceDigitalTwinWithResponse(roomTwinId, roomTwin, String.class, null, Context.NONE); } String queryString = "SELECT * FROM digitaltwins where IsOccupied = true"; PagedIterable<BasicDigitalTwin> pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class, new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); for (BasicDigitalTwin digitalTwin : pagedQueryResponse) { assertThat(digitalTwin.getContents().get("IsOccupied")) .as("IsOccupied should be true") .isEqualTo(true); } pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class,new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); int pageCount = 0; for (Page<BasicDigitalTwin> digitalTwinsPage : pagedQueryResponse.iterableByPage()) { pageCount++; int elementsPerPage = 0; for (BasicDigitalTwin basicDigitalTwin : digitalTwinsPage.getElements()) { elementsPerPage++; } if (digitalTwinsPage.getContinuationToken() != null) { assertFalse(elementsPerPage < pageSize, "Unexpected page size for a non-terminal page"); } } assertTrue(pageCount > 1, "Expected more than one page of query results"); } finally { try { for (String roomTwinId : roomTwinIds) { client.deleteDigitalTwin(roomTwinId); } if (roomModelId != null) { client.deleteModel(roomModelId); } } catch (Exception ex) { fail("Failed to cleanup due to: ", ex); } } }
class QueryTests extends QueryTestBase { private final ClientLogger logger = new ClientLogger(ComponentsTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
class QueryTests extends QueryTestBase { private final ClientLogger logger = new ClientLogger(ComponentsTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
it just changes the max-item-per-page to 3 from 5 ...
public void validQuerySucceeds(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsClient client = getClient(httpClient, serviceVersion); int pageSize = 3; String floorModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); List<String> roomTwinIds = new ArrayList<>(); try { String roomModelPayload = TestAssetsHelper.getRoomModelPayload(roomModelId, floorModelId); client.createModelsWithResponse(new ArrayList<>(Arrays.asList(roomModelPayload)), Context.NONE); String roomTwin = TestAssetsHelper.getRoomTwinPayload(roomModelId); for (int i = 0; i < pageSize + 1; i++) { String roomTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); roomTwinIds.add(roomTwinId); client.createOrReplaceDigitalTwinWithResponse(roomTwinId, roomTwin, String.class, null, Context.NONE); } String queryString = "SELECT * FROM digitaltwins where IsOccupied = true"; PagedIterable<BasicDigitalTwin> pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class, new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); for (BasicDigitalTwin digitalTwin : pagedQueryResponse) { assertThat(digitalTwin.getContents().get("IsOccupied")) .as("IsOccupied should be true") .isEqualTo(true); } pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class,new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); int pageCount = 0; for (Page<BasicDigitalTwin> digitalTwinsPage : pagedQueryResponse.iterableByPage()) { pageCount++; int elementsPerPage = 0; for (BasicDigitalTwin basicDigitalTwin : digitalTwinsPage.getElements()) { elementsPerPage++; } if (digitalTwinsPage.getContinuationToken() != null) { assertFalse(elementsPerPage < pageSize, "Unexpected page size for a non-terminal page"); } } assertTrue(pageCount > 1, "Expected more than one page of query results"); } finally { try { for (String roomTwinId : roomTwinIds) { client.deleteDigitalTwin(roomTwinId); } if (roomModelId != null) { client.deleteModel(roomModelId); } } catch (Exception ex) { fail("Failed to cleanup due to: ", ex); } } }
int pageSize = 3;
public void validQuerySucceeds(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) { DigitalTwinsClient client = getClient(httpClient, serviceVersion); int pageSize = 3; String floorModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.FLOOR_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); String roomModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_MODEL_ID_PREFIX, client, randomIntegerStringGenerator); List<String> roomTwinIds = new ArrayList<>(); try { String roomModelPayload = TestAssetsHelper.getRoomModelPayload(roomModelId, floorModelId); client.createModelsWithResponse(new ArrayList<>(Arrays.asList(roomModelPayload)), Context.NONE); String roomTwin = TestAssetsHelper.getRoomTwinPayload(roomModelId); for (int i = 0; i < pageSize + 1; i++) { String roomTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_TWIN_ID_PREFIX, client, randomIntegerStringGenerator); roomTwinIds.add(roomTwinId); client.createOrReplaceDigitalTwinWithResponse(roomTwinId, roomTwin, String.class, null, Context.NONE); } String queryString = "SELECT * FROM digitaltwins where IsOccupied = true"; PagedIterable<BasicDigitalTwin> pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class, new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); for (BasicDigitalTwin digitalTwin : pagedQueryResponse) { assertThat(digitalTwin.getContents().get("IsOccupied")) .as("IsOccupied should be true") .isEqualTo(true); } pagedQueryResponse = client.query(queryString, BasicDigitalTwin.class,new QueryOptions().setMaxItemsPerPage(pageSize), Context.NONE); int pageCount = 0; for (Page<BasicDigitalTwin> digitalTwinsPage : pagedQueryResponse.iterableByPage()) { pageCount++; int elementsPerPage = 0; for (BasicDigitalTwin basicDigitalTwin : digitalTwinsPage.getElements()) { elementsPerPage++; } if (digitalTwinsPage.getContinuationToken() != null) { assertFalse(elementsPerPage < pageSize, "Unexpected page size for a non-terminal page"); } } assertTrue(pageCount > 1, "Expected more than one page of query results"); } finally { try { for (String roomTwinId : roomTwinIds) { client.deleteDigitalTwin(roomTwinId); } if (roomModelId != null) { client.deleteModel(roomModelId); } } catch (Exception ex) { fail("Failed to cleanup due to: ", ex); } } }
class QueryTests extends QueryTestBase { private final ClientLogger logger = new ClientLogger(ComponentsTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
class QueryTests extends QueryTestBase { private final ClientLogger logger = new ClientLogger(ComponentsTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
nit: this check is kind of redundant. this code would throw line above if value is negative. is there any validation in the callers of this method that would check length anyway? or maybe we just just throw assertion at the beginning of this method - I think we should throw on negative lenght somewhere.
protected void writeInternal(final byte[] data, int offset, int length) { this.checkStreamState(); /* We need to do a deep copy here because the writing is async in this case. It is a common pattern for customers writing to an output stream to perform the writes in a tight loop with a reused buffer. This coupled with async network behavior can result in the data being overwritten as the buffer is reused. */ byte[] buffer = new byte[length]; if (length >= 0) System.arraycopy(data, offset, buffer, 0, length); sink.next(ByteBuffer.wrap(buffer)); }
if (length >= 0) System.arraycopy(data, offset, buffer, 0, length);
protected void writeInternal(final byte[] data, int offset, int length) { this.checkStreamState(); /* We need to do a deep copy here because the writing is async in this case. It is a common pattern for customers writing to an output stream to perform the writes in a tight loop with a reused buffer. This coupled with async network behavior can result in the data being overwritten as the buffer is reused. */ byte[] buffer = new byte[length]; System.arraycopy(data, offset, buffer, 0, length); sink.next(ByteBuffer.wrap(buffer)); }
class BlockBlobOutputStream extends BlobOutputStream { private FluxSink<ByteBuffer> sink; private final Lock lock; private final Condition transferComplete; boolean complete; private BlockBlobOutputStream(final BlobAsyncClient client, final ParallelTransferOptions parallelTransferOptions, final BlobHttpHeaders headers, final Map<String, String> metadata, Map<String, String> tags, final AccessTier tier, final BlobRequestConditions requestConditions, Context context) { super(Integer.MAX_VALUE); context = context == null || context.equals(Context.NONE) ? null : context; this.lock = new ReentrantLock(); this.transferComplete = lock.newCondition(); Flux<ByteBuffer> fbb = Flux.create((FluxSink<ByteBuffer> sink) -> this.sink = sink); /* Subscribe by upload takes too long. We need to subscribe so that the sink is actually created. Since this subscriber doesn't do anything and no data has started flowing, there are no drawbacks to this extra subscribe. */ fbb.subscribe(); client.uploadWithResponse(new BlobParallelUploadOptions(fbb). setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTags(tags).setTier(tier).setRequestConditions(requestConditions)) .onErrorResume(e -> { if (e instanceof IOException) { this.lastError = (IOException) e; } else { this.lastError = new IOException(e); } return Mono.empty(); }) .doOnTerminate(() -> { lock.lock(); try { complete = true; transferComplete.signal(); } finally { lock.unlock(); } }) .subscriberContext(FluxUtil.toReactorContext(context)) .subscribe(); } @Override void commit() { lock.lock(); try { sink.complete(); /* Allow upload task to try to complete. */ while (!complete) { transferComplete.await(); } } catch (InterruptedException e) { this.lastError = new IOException(e.getMessage()); } finally { lock.unlock(); } } @Override @Override protected Mono<Void> dispatchWrite(byte[] data, int writeLength, long offset) { return Mono.empty(); } }
class BlockBlobOutputStream extends BlobOutputStream { private FluxSink<ByteBuffer> sink; private final Lock lock; private final Condition transferComplete; boolean complete; private BlockBlobOutputStream(final BlobAsyncClient client, final ParallelTransferOptions parallelTransferOptions, final BlobHttpHeaders headers, final Map<String, String> metadata, Map<String, String> tags, final AccessTier tier, final BlobRequestConditions requestConditions, Context context) { super(Integer.MAX_VALUE); context = context == null || context.equals(Context.NONE) ? null : context; this.lock = new ReentrantLock(); this.transferComplete = lock.newCondition(); Flux<ByteBuffer> fbb = Flux.create((FluxSink<ByteBuffer> sink) -> this.sink = sink); /* Subscribe by upload takes too long. We need to subscribe so that the sink is actually created. Since this subscriber doesn't do anything and no data has started flowing, there are no drawbacks to this extra subscribe. */ fbb.subscribe(); client.uploadWithResponse(new BlobParallelUploadOptions(fbb). setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTags(tags).setTier(tier).setRequestConditions(requestConditions)) .onErrorResume(e -> { if (e instanceof IOException) { this.lastError = (IOException) e; } else { this.lastError = new IOException(e); } return Mono.empty(); }) .doOnTerminate(() -> { lock.lock(); try { complete = true; transferComplete.signal(); } finally { lock.unlock(); } }) .subscriberContext(FluxUtil.toReactorContext(context)) .subscribe(); } @Override void commit() { lock.lock(); try { sink.complete(); /* Allow upload task to try to complete. */ while (!complete) { transferComplete.await(); } } catch (InterruptedException e) { this.lastError = new IOException(e.getMessage()); } finally { lock.unlock(); } } @Override @Override protected Mono<Void> dispatchWrite(byte[] data, int writeLength, long offset) { return Mono.empty(); } }
Is this change ok for cases where customer is interacting with async clients and using APIs that take Flux<ByteBuffer> instead of Streams?
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
size += buf.remaining();
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
The caller does check this, so I'll remove it here.
protected void writeInternal(final byte[] data, int offset, int length) { this.checkStreamState(); /* We need to do a deep copy here because the writing is async in this case. It is a common pattern for customers writing to an output stream to perform the writes in a tight loop with a reused buffer. This coupled with async network behavior can result in the data being overwritten as the buffer is reused. */ byte[] buffer = new byte[length]; if (length >= 0) System.arraycopy(data, offset, buffer, 0, length); sink.next(ByteBuffer.wrap(buffer)); }
if (length >= 0) System.arraycopy(data, offset, buffer, 0, length);
protected void writeInternal(final byte[] data, int offset, int length) { this.checkStreamState(); /* We need to do a deep copy here because the writing is async in this case. It is a common pattern for customers writing to an output stream to perform the writes in a tight loop with a reused buffer. This coupled with async network behavior can result in the data being overwritten as the buffer is reused. */ byte[] buffer = new byte[length]; System.arraycopy(data, offset, buffer, 0, length); sink.next(ByteBuffer.wrap(buffer)); }
class BlockBlobOutputStream extends BlobOutputStream { private FluxSink<ByteBuffer> sink; private final Lock lock; private final Condition transferComplete; boolean complete; private BlockBlobOutputStream(final BlobAsyncClient client, final ParallelTransferOptions parallelTransferOptions, final BlobHttpHeaders headers, final Map<String, String> metadata, Map<String, String> tags, final AccessTier tier, final BlobRequestConditions requestConditions, Context context) { super(Integer.MAX_VALUE); context = context == null || context.equals(Context.NONE) ? null : context; this.lock = new ReentrantLock(); this.transferComplete = lock.newCondition(); Flux<ByteBuffer> fbb = Flux.create((FluxSink<ByteBuffer> sink) -> this.sink = sink); /* Subscribe by upload takes too long. We need to subscribe so that the sink is actually created. Since this subscriber doesn't do anything and no data has started flowing, there are no drawbacks to this extra subscribe. */ fbb.subscribe(); client.uploadWithResponse(new BlobParallelUploadOptions(fbb). setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTags(tags).setTier(tier).setRequestConditions(requestConditions)) .onErrorResume(e -> { if (e instanceof IOException) { this.lastError = (IOException) e; } else { this.lastError = new IOException(e); } return Mono.empty(); }) .doOnTerminate(() -> { lock.lock(); try { complete = true; transferComplete.signal(); } finally { lock.unlock(); } }) .subscriberContext(FluxUtil.toReactorContext(context)) .subscribe(); } @Override void commit() { lock.lock(); try { sink.complete(); /* Allow upload task to try to complete. */ while (!complete) { transferComplete.await(); } } catch (InterruptedException e) { this.lastError = new IOException(e.getMessage()); } finally { lock.unlock(); } } @Override @Override protected Mono<Void> dispatchWrite(byte[] data, int writeLength, long offset) { return Mono.empty(); } }
class BlockBlobOutputStream extends BlobOutputStream { private FluxSink<ByteBuffer> sink; private final Lock lock; private final Condition transferComplete; boolean complete; private BlockBlobOutputStream(final BlobAsyncClient client, final ParallelTransferOptions parallelTransferOptions, final BlobHttpHeaders headers, final Map<String, String> metadata, Map<String, String> tags, final AccessTier tier, final BlobRequestConditions requestConditions, Context context) { super(Integer.MAX_VALUE); context = context == null || context.equals(Context.NONE) ? null : context; this.lock = new ReentrantLock(); this.transferComplete = lock.newCondition(); Flux<ByteBuffer> fbb = Flux.create((FluxSink<ByteBuffer> sink) -> this.sink = sink); /* Subscribe by upload takes too long. We need to subscribe so that the sink is actually created. Since this subscriber doesn't do anything and no data has started flowing, there are no drawbacks to this extra subscribe. */ fbb.subscribe(); client.uploadWithResponse(new BlobParallelUploadOptions(fbb). setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTags(tags).setTier(tier).setRequestConditions(requestConditions)) .onErrorResume(e -> { if (e instanceof IOException) { this.lastError = (IOException) e; } else { this.lastError = new IOException(e); } return Mono.empty(); }) .doOnTerminate(() -> { lock.lock(); try { complete = true; transferComplete.signal(); } finally { lock.unlock(); } }) .subscriberContext(FluxUtil.toReactorContext(context)) .subscribe(); } @Override void commit() { lock.lock(); try { sink.complete(); /* Allow upload task to try to complete. */ while (!complete) { transferComplete.await(); } } catch (InterruptedException e) { this.lastError = new IOException(e.getMessage()); } finally { lock.unlock(); } } @Override @Override protected Mono<Void> dispatchWrite(byte[] data, int writeLength, long offset) { return Mono.empty(); } }
I think it should be. We only added this because of issues we were seeing in the sync case that came from an internal method reusing buffers to copy to an output stream. I don't think reusing buffers like that is a pattern in the async case because there aren't streams to copy between
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
size += buf.remaining();
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
There are few tests that use `TransientFailureInjectingHttpPipelinePolicy` and use `Flux` (i.e. `Buffered upload handle pathing hot flux with transient failure`). However it's live only and it doesn't seem to cover larger payloads (i.e. it doesn't seem to cross single upload threshold). Could you add some coverage for this to be 100% sure?
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
size += buf.remaining();
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
well, just noticed that it lowers single upload threshold. have you run it? (asking as CI won't catch it)
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
size += buf.remaining();
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
I ran the live test pipeline and that seems to be passing. Only the analyze step looks to be failing atm
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
size += buf.remaining();
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
Should we add a changelog entry for common, though all these changes are "internal only" changes
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
if (isThresholdBreached()) {
Flux<ByteBuffer> write(ByteBuffer buf) { if (isThresholdBreached()) { size += buf.remaining(); return Flux.just(buf); } else { size += buf.remaining(); byteBuffers.add(buf); if (isThresholdBreached()) { Flux<ByteBuffer> result = dequeuingFlux(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
class PayloadSizeGate { private final long threshold; private long size = 0; private Queue<ByteBuffer> byteBuffers = new LinkedList<>(); /** * Creates a new instance of PayloadSizeGate * @param threshold Number of bytes up to which data is buffered. */ PayloadSizeGate(long threshold) { this.threshold = threshold; } /** * Keeps buffering buffers until threshold is breached. * Then it acts as pass-through. * @param buf Incoming data. * @return Buffered data or incoming data depending on threshold condition. */ /** * Flushes the gate. If threshold has not been broken then invoking this method pushes any lingering data forward. * @return Buffered data if threshold has not been broken. Otherwise empty. */ Flux<ByteBuffer> flush() { if (byteBuffers != null) { Flux<ByteBuffer> result = Flux.fromIterable(byteBuffers); byteBuffers = null; return result; } else { return Flux.empty(); } } /** * @return Size of data observed by the gate. */ long size() { return size; } /** * @return A flag indicating if observed data has breached the threshold. */ boolean isThresholdBreached() { return size > threshold; } private static Flux<ByteBuffer> dequeuingFlux(Queue<ByteBuffer> queue) { return Flux.generate(sink -> { ByteBuffer buffer = queue.poll(); if (buffer != null) { sink.next(buffer); } else { sink.complete(); } }); } }
@alzimmermsft Do we want to keep this around for the same reason we did with the sas credential policy pr--in case someone upgrades common but not blob?
public static void logStringToSign(ClientLogger logger, String stringToSign, Context context) { if (context != null && Boolean.TRUE.equals(context.getData(Constants.STORAGE_LOG_STRING_TO_SIGN).orElse(false))) { logger.info(STRING_TO_SIGN_LOG_INFO_MESSAGE, stringToSign, System.lineSeparator()); logger.warning(STRING_TO_SIGN_LOG_WARNING_MESSAGE, Constants.STORAGE_LOG_STRING_TO_SIGN); } }
logger.warning(STRING_TO_SIGN_LOG_WARNING_MESSAGE, Constants.STORAGE_LOG_STRING_TO_SIGN);
public static void logStringToSign(ClientLogger logger, String stringToSign, Context context) { if (context != null && Boolean.TRUE.equals(context.getData(Constants.STORAGE_LOG_STRING_TO_SIGN).orElse(false))) { logger.info(STRING_TO_SIGN_LOG_INFO_MESSAGE, stringToSign, System.lineSeparator()); logger.warning(STRING_TO_SIGN_LOG_WARNING_MESSAGE, Constants.STORAGE_LOG_STRING_TO_SIGN); } }
class StorageImplUtils { private static final ClientLogger LOGGER = new ClientLogger(StorageImplUtils.class); private static final String ARGUMENT_NULL_OR_EMPTY = "The argument must not be null or an empty string. Argument name: %s."; private static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s."; private static final String NO_PATH_SEGMENTS = "URL %s does not contain path segments."; private static final String STRING_TO_SIGN_LOG_INFO_MESSAGE = "The string to sign computed by the SDK is: {}{}"; private static final String STRING_TO_SIGN_LOG_WARNING_MESSAGE = "Please remember to disable '{}' before going " + "to production as this string can potentially contain PII."; private static final String STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE = String.format( "If you are using a StorageSharedKeyCredential, and the server returned an " + "error message that says 'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value pair " + "'%s': true to the appropriate method call.%n" + "If you are using a SAS token, and the server returned an error message that says " + "'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value " + "pair '%s': true to the appropriate generateSas method call.%n" + "Please remember to disable '%s' before going to production as this string can potentially " + "contain PII.%n", Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN); /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a string (ex. key=val1,val2,val3 instead of key=[val1, val2, val3]). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String> parseQueryString(final String queryString) { return parseQueryStringHelper(queryString, Utility::urlDecode); } /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a parsed array (ex. key=[val1, val2, val3] instead of key=val1,val2,val3). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String[]> parseQueryStringSplitValues(final String queryString) { return parseQueryStringHelper(queryString, value -> { String[] v = value.split(","); String[] ret = new String[v.length]; for (int i = 0; i < v.length; i++) { ret[i] = urlDecode(v[i]); } return ret; }); } private static <T> Map<String, T> parseQueryStringHelper(final String queryString, Function<String, T> valueParser) { TreeMap<String, T> pieces = new TreeMap<>(); if (CoreUtils.isNullOrEmpty(queryString)) { return pieces; } for (String kvp : queryString.split("&")) { int equalIndex = kvp.indexOf("="); String key = urlDecode(kvp.substring(0, equalIndex).toLowerCase(Locale.ROOT)); T value = valueParser.apply(kvp.substring(equalIndex + 1)); pieces.putIfAbsent(key, value); } return pieces; } /** * Blocks an asynchronous response with an optional timeout. * * @param response Asynchronous response to block * @param timeout Optional timeout * @param <T> Return type of the asynchronous response * @return the value of the asynchronous response * @throws RuntimeException If the asynchronous response doesn't complete before the timeout expires. */ public static <T> T blockWithOptionalTimeout(Mono<T> response, Duration timeout) { if (timeout == null) { return response.block(); } else { return response.block(timeout); } } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Mono to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Mono. * @return Mono with an applied timeout, if any. */ public static <T> Mono<T> applyOptionalTimeout(Mono<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Flux to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Flux. * @return Flux with an applied timeout, if any. */ public static <T> Flux<T> applyOptionalTimeout(Flux<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Asserts that a value is not {@code null}. * * @param param Name of the parameter * @param value Value of the parameter * @throws NullPointerException If {@code value} is {@code null} */ public static void assertNotNull(final String param, final Object value) { if (value == null) { throw new NullPointerException(String.format(Locale.ROOT, ARGUMENT_NULL_OR_EMPTY, param)); } } /** * Asserts that the specified number is in the valid range. The range is inclusive. * * @param param Name of the parameter * @param value Value of the parameter * @param min The minimum allowed value * @param max The maximum allowed value * @throws IllegalArgumentException If {@code value} is less than {@code min} or {@code value} is greater than * {@code max}. */ public static void assertInBounds(final String param, final long value, final long min, final long max) { if (value < min || value > max) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(Locale.ROOT, PARAMETER_NOT_IN_RANGE, param, min, max))); } } /** * Computes a signature for the specified string using the HMAC-SHA256 algorithm. * * @param base64Key Base64 encoded key used to sign the string * @param stringToSign UTF-8 encoded string to sign * @return the HMAC-SHA256 encoded signature * @throws RuntimeException If the HMAC-SHA256 algorithm isn't support, if the key isn't a valid Base64 encoded * string, or the UTF-8 charset isn't supported. */ public static String computeHMac256(final String base64Key, final String stringToSign) { try { byte[] key = Base64.getDecoder().decode(base64Key); Mac hmacSHA256 = Mac.getInstance("HmacSHA256"); hmacSHA256.init(new SecretKeySpec(key, "HmacSHA256")); byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8); return Base64.getEncoder().encodeToString(hmacSHA256.doFinal(utf8Bytes)); } catch (NoSuchAlgorithmException | InvalidKeyException ex) { throw new RuntimeException(ex); } } /** * Appends a string to the end of the passed URL's path. * * @param baseURL URL having a path appended * @param name Name of the path * @return a URL with the path appended. * @throws IllegalArgumentException If {@code name} causes the URL to become malformed. */ public static URL appendToUrlPath(String baseURL, String name) { UrlBuilder builder = UrlBuilder.parse(baseURL); if (builder.getPath() == null) { builder.setPath("/"); } else if (!builder.getPath().endsWith("/")) { builder.setPath(builder.getPath() + "/"); } builder.setPath(builder.getPath() + name); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the last path segment from the passed URL. * * @param baseUrl URL having its last path segment stripped * @return a URL with the path segment stripped. * @throws IllegalArgumentException If stripping the last path segment causes the URL to become malformed or it * doesn't contain any path segments. */ public static URL stripLastPathSegment(URL baseUrl) { UrlBuilder builder = UrlBuilder.parse(baseUrl); if (builder.getPath() == null || !builder.getPath().contains("/")) { throw new IllegalArgumentException(String.format(Locale.ROOT, NO_PATH_SEGMENTS, baseUrl)); } builder.setPath(builder.getPath().substring(0, builder.getPath().lastIndexOf("/"))); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the account name from host part of the URL object. * * @param url URL having its hostanme * @return account name. */ public static String getAccountName(URL url) { UrlBuilder builder = UrlBuilder.parse(url); String accountName = null; String host = builder.getHost(); if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } return accountName; } /** Returns an empty string if value is {@code null}, otherwise returns value * @param value The value to check and return. * @return The value or empty string. */ public static String emptyIfNull(String value) { return value == null ? "" : value; } /** * Logs the string to sign if a valid context is provided. * * @param logger {@link ClientLogger} * @param stringToSign The string to sign to log. * @param context Additional context to determine if the string to sign should be logged. */ /** * Converts the storage exception message. * * @param message The storage exception message * @param response The storage service response. * @return The converted storage exception message. */ public static String convertStorageExceptionMessage(String message, HttpResponse response) { if (response != null) { if (response.getStatusCode() == 403) { return STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE + message; } if (response.getRequest() != null && response.getRequest().getHttpMethod() != null && response.getRequest().getHttpMethod().equals(HttpMethod.HEAD) && response.getHeaders().getValue(ERROR_CODE) != null) { return message.replaceFirst("(empty body)", response.getHeaders().getValue(ERROR_CODE)); } } return message; } }
class StorageImplUtils { private static final ClientLogger LOGGER = new ClientLogger(StorageImplUtils.class); private static final String ARGUMENT_NULL_OR_EMPTY = "The argument must not be null or an empty string. Argument name: %s."; private static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s."; private static final String NO_PATH_SEGMENTS = "URL %s does not contain path segments."; private static final String STRING_TO_SIGN_LOG_INFO_MESSAGE = "The string to sign computed by the SDK is: {}{}"; private static final String STRING_TO_SIGN_LOG_WARNING_MESSAGE = "Please remember to disable '{}' before going " + "to production as this string can potentially contain PII."; private static final String STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE = String.format( "If you are using a StorageSharedKeyCredential, and the server returned an " + "error message that says 'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value pair " + "'%s': true to the appropriate method call.%n" + "If you are using a SAS token, and the server returned an error message that says " + "'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value " + "pair '%s': true to the appropriate generateSas method call.%n" + "Please remember to disable '%s' before going to production as this string can potentially " + "contain PII.%n", Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN); /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a string (ex. key=val1,val2,val3 instead of key=[val1, val2, val3]). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String> parseQueryString(final String queryString) { return parseQueryStringHelper(queryString, Utility::urlDecode); } /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a parsed array (ex. key=[val1, val2, val3] instead of key=val1,val2,val3). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String[]> parseQueryStringSplitValues(final String queryString) { return parseQueryStringHelper(queryString, value -> { String[] v = value.split(","); String[] ret = new String[v.length]; for (int i = 0; i < v.length; i++) { ret[i] = urlDecode(v[i]); } return ret; }); } private static <T> Map<String, T> parseQueryStringHelper(final String queryString, Function<String, T> valueParser) { TreeMap<String, T> pieces = new TreeMap<>(); if (CoreUtils.isNullOrEmpty(queryString)) { return pieces; } for (String kvp : queryString.split("&")) { int equalIndex = kvp.indexOf("="); String key = urlDecode(kvp.substring(0, equalIndex).toLowerCase(Locale.ROOT)); T value = valueParser.apply(kvp.substring(equalIndex + 1)); pieces.putIfAbsent(key, value); } return pieces; } /** * Blocks an asynchronous response with an optional timeout. * * @param response Asynchronous response to block * @param timeout Optional timeout * @param <T> Return type of the asynchronous response * @return the value of the asynchronous response * @throws RuntimeException If the asynchronous response doesn't complete before the timeout expires. */ public static <T> T blockWithOptionalTimeout(Mono<T> response, Duration timeout) { if (timeout == null) { return response.block(); } else { return response.block(timeout); } } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Mono to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Mono. * @return Mono with an applied timeout, if any. */ public static <T> Mono<T> applyOptionalTimeout(Mono<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Flux to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Flux. * @return Flux with an applied timeout, if any. */ public static <T> Flux<T> applyOptionalTimeout(Flux<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Asserts that a value is not {@code null}. * * @param param Name of the parameter * @param value Value of the parameter * @throws NullPointerException If {@code value} is {@code null} */ public static void assertNotNull(final String param, final Object value) { if (value == null) { throw new NullPointerException(String.format(Locale.ROOT, ARGUMENT_NULL_OR_EMPTY, param)); } } /** * Asserts that the specified number is in the valid range. The range is inclusive. * * @param param Name of the parameter * @param value Value of the parameter * @param min The minimum allowed value * @param max The maximum allowed value * @throws IllegalArgumentException If {@code value} is less than {@code min} or {@code value} is greater than * {@code max}. */ public static void assertInBounds(final String param, final long value, final long min, final long max) { if (value < min || value > max) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(Locale.ROOT, PARAMETER_NOT_IN_RANGE, param, min, max))); } } /** * Computes a signature for the specified string using the HMAC-SHA256 algorithm. * * @param base64Key Base64 encoded key used to sign the string * @param stringToSign UTF-8 encoded string to sign * @return the HMAC-SHA256 encoded signature * @throws RuntimeException If the HMAC-SHA256 algorithm isn't support, if the key isn't a valid Base64 encoded * string, or the UTF-8 charset isn't supported. */ public static String computeHMac256(final String base64Key, final String stringToSign) { try { byte[] key = Base64.getDecoder().decode(base64Key); Mac hmacSHA256 = Mac.getInstance("HmacSHA256"); hmacSHA256.init(new SecretKeySpec(key, "HmacSHA256")); byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8); return Base64.getEncoder().encodeToString(hmacSHA256.doFinal(utf8Bytes)); } catch (NoSuchAlgorithmException | InvalidKeyException ex) { throw new RuntimeException(ex); } } /** * Appends a string to the end of the passed URL's path. * * @param baseURL URL having a path appended * @param name Name of the path * @return a URL with the path appended. * @throws IllegalArgumentException If {@code name} causes the URL to become malformed. */ public static URL appendToUrlPath(String baseURL, String name) { UrlBuilder builder = UrlBuilder.parse(baseURL); if (builder.getPath() == null) { builder.setPath("/"); } else if (!builder.getPath().endsWith("/")) { builder.setPath(builder.getPath() + "/"); } builder.setPath(builder.getPath() + name); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the last path segment from the passed URL. * * @param baseUrl URL having its last path segment stripped * @return a URL with the path segment stripped. * @throws IllegalArgumentException If stripping the last path segment causes the URL to become malformed or it * doesn't contain any path segments. */ public static URL stripLastPathSegment(URL baseUrl) { UrlBuilder builder = UrlBuilder.parse(baseUrl); if (builder.getPath() == null || !builder.getPath().contains("/")) { throw new IllegalArgumentException(String.format(Locale.ROOT, NO_PATH_SEGMENTS, baseUrl)); } builder.setPath(builder.getPath().substring(0, builder.getPath().lastIndexOf("/"))); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the account name from host part of the URL object. * * @param url URL having its hostanme * @return account name. */ public static String getAccountName(URL url) { UrlBuilder builder = UrlBuilder.parse(url); String accountName = null; String host = builder.getHost(); if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } return accountName; } /** Returns an empty string if value is {@code null}, otherwise returns value * @param value The value to check and return. * @return The value or empty string. */ public static String emptyIfNull(String value) { return value == null ? "" : value; } /** * Logs the string to sign if a valid context is provided. * * @param logger {@link ClientLogger} * @param stringToSign The string to sign to log. * @param context Additional context to determine if the string to sign should be logged. */ /** * Converts the storage exception message. * * @param message The storage exception message * @param response The storage service response. * @return The converted storage exception message. */ public static String convertStorageExceptionMessage(String message, HttpResponse response) { if (response != null) { if (response.getStatusCode() == 403) { return STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE + message; } if (response.getRequest() != null && response.getRequest().getHttpMethod() != null && response.getRequest().getHttpMethod().equals(HttpMethod.HEAD) && response.getHeaders().getValue(ERROR_CODE) != null) { return message.replaceFirst("(empty body)", response.getHeaders().getValue(ERROR_CODE)); } } return message; } }
Easier to use `? :` (or just another variable name as constant) for constant variable, instead of `AtmoicBoolean`.
private void calcChangeLogForClass() { switch (getJApiClass().getChangeStatus()) { case NEW: newFeature.add(String.format("* `%s` was added", getJApiClass().getFullyQualifiedName())); break; case REMOVED: breakingChange.add(String.format("* `%s` was removed", getJApiClass().getFullyQualifiedName())); break; default: AtomicBoolean checkReturnType = new AtomicBoolean(true); if (ClassName.name(getJApiClass()).equals("Definition")) { checkReturnType.set(false); } allMethods.getMethods().forEach(method -> this.calcChangelogForMethod(method, checkReturnType.get())); break; } }
}
private void calcChangeLogForClass() { switch (getJApiClass().getChangeStatus()) { case NEW: newFeature.add(String.format("* `%s` was added", getJApiClass().getFullyQualifiedName())); break; case REMOVED: breakingChange.add(String.format("* `%s` was removed", getJApiClass().getFullyQualifiedName())); break; default: boolean checkReturnType = !ClassName.name(getJApiClass()).equals("Definition"); allMethods.getMethods().forEach(method -> this.calcChangelogForMethod(method, checkReturnType)); break; } }
class ChangeLog { private AllMethods allMethods; protected List<String> newFeature; protected List<String> breakingChange; ChangeLog() { this.newFeature = new ArrayList<>(); this.breakingChange = new ArrayList<>(); } ChangeLog(AllMethods allMethods) { this.allMethods = allMethods; this.newFeature = new ArrayList<>(); this.breakingChange = new ArrayList<>(); calcChangeLog(); } public static List<ChangeLog> fromClasses(List<JApiClass> classes) { Map<String, JApiClass> classMap = classes.stream().collect(Collectors.toMap(JApiClass::getFullyQualifiedName, x -> x)); Map<String, AllMethods> allMethods = new HashMap<>(); AllMethods.fromClasses(classMap, allMethods); Map<String, Map<String, AllMethods>> stages = new HashMap<>(); List<ChangeLog> changeLogForNonStage = allMethods.entrySet().stream().map(entry -> { String namespace = ClassName.namespace(entry.getKey()); String parentClass = ClassName.parentName(entry.getKey()); String parentName = ClassName.name(parentClass); if (parentName.equals("DefinitionStages")) { stages.computeIfAbsent(namespace + "." + parentClass, key -> new HashMap<>()).put(entry.getKey(), entry.getValue()); } else if (!parentName.equals("UpdateStages")) { return new ChangeLog(entry.getValue()); } return null; }).filter(Objects::nonNull).collect(Collectors.toList()); Stream<ChangeLog> changeLogForStage = stages.entrySet().stream().map(entry -> new DefinitionStageChangeLog(entry.getValue(), entry.getKey())); return Stream.concat(changeLogForStage, changeLogForNonStage.stream()).collect(Collectors.toList()); } public JApiClass getJApiClass() { return this.allMethods.getJApiClass(); } public String getNewFeature() { StringBuilder builder = new StringBuilder(); for (int i = 0; i < this.newFeature.size(); ++i) { builder.append(this.newFeature.get(i)).append("\n"); if (i + 1 == this.newFeature.size()) { builder.append("\n"); } } return builder.toString(); } public String getBreakingChange() { StringBuilder builder = new StringBuilder(); for (int i = 0; i < this.breakingChange.size(); ++i) { builder.append(this.breakingChange.get(i)).append("\n"); if (i + 1 == this.breakingChange.size()) { builder.append("\n"); } } return builder.toString(); } public boolean isClassLevelChanged() { return getJApiClass().getChangeStatus() == JApiChangeStatus.NEW || getJApiClass().getChangeStatus() == JApiChangeStatus.REMOVED; } protected void calcChangeLog() { calcChangeLogForClass(); } protected void addClassTitle(List<String> list) { if (list.isEmpty()) { list.add(String.format(" list.add(""); } } private void calcChangelogForMethod(JApiMethod method, boolean checkReturnType) { switch (method.getChangeStatus()) { case NEW: addClassTitle(newFeature); newFeature.add(String.format("* `%s` was added", MethodName.name(method.getNewMethod().get()))); break; case REMOVED: addClassTitle(breakingChange); breakingChange.add(String.format("* `%s` was removed", MethodName.name(method.getOldMethod().get()))); break; case MODIFIED: if (!checkReturnType){ if (!method.getOldMethod().get().getLongName().equals(method.getNewMethod().get().getLongName())) { addClassTitle(breakingChange); breakingChange.add(String.format("* `%s` -> `%s`", MethodName.name(method.getOldMethod().get()), MethodName.name(method.getNewMethod().get()))); } } else { addClassTitle(breakingChange); breakingChange.add(String.format("* `%s %s` -> `%s %s`", method.getReturnType().getOldReturnType(), MethodName.name(method.getOldMethod().get()), method.getReturnType().getNewReturnType(), MethodName.name(method.getNewMethod().get()))); } break; } } }
class ChangeLog { private AllMethods allMethods; protected List<String> newFeature; protected List<String> breakingChange; ChangeLog() { this.newFeature = new ArrayList<>(); this.breakingChange = new ArrayList<>(); } ChangeLog(AllMethods allMethods) { this.allMethods = allMethods; this.newFeature = new ArrayList<>(); this.breakingChange = new ArrayList<>(); calcChangeLog(); } public static List<ChangeLog> fromClasses(List<JApiClass> classes) { Map<String, JApiClass> classMap = classes.stream().collect(Collectors.toMap(JApiClass::getFullyQualifiedName, x -> x)); Map<String, AllMethods> allMethods = new HashMap<>(); AllMethods.fromClasses(classMap, allMethods); Map<String, Map<String, AllMethods>> stages = new HashMap<>(); List<ChangeLog> changeLogForNonStage = allMethods.entrySet().stream().map(entry -> { String namespace = ClassName.namespace(entry.getKey()); String parentClass = ClassName.parentName(entry.getKey()); String parentName = ClassName.name(parentClass); if (parentName.equals("DefinitionStages")) { stages.computeIfAbsent(namespace + "." + parentClass, key -> new HashMap<>()).put(entry.getKey(), entry.getValue()); } else if (!parentName.equals("UpdateStages")) { return new ChangeLog(entry.getValue()); } return null; }).filter(Objects::nonNull).collect(Collectors.toList()); Stream<ChangeLog> changeLogForStage = stages.entrySet().stream().map(entry -> new DefinitionStageChangeLog(entry.getValue(), entry.getKey())); return Stream.concat(changeLogForStage, changeLogForNonStage.stream()).collect(Collectors.toList()); } public JApiClass getJApiClass() { return this.allMethods.getJApiClass(); } public String getNewFeature() { StringBuilder builder = new StringBuilder(); for (int i = 0; i < this.newFeature.size(); ++i) { builder.append(this.newFeature.get(i)).append("\n"); if (i + 1 == this.newFeature.size()) { builder.append("\n"); } } return builder.toString(); } public String getBreakingChange() { StringBuilder builder = new StringBuilder(); for (int i = 0; i < this.breakingChange.size(); ++i) { builder.append(this.breakingChange.get(i)).append("\n"); if (i + 1 == this.breakingChange.size()) { builder.append("\n"); } } return builder.toString(); } public boolean isClassLevelChanged() { return getJApiClass().getChangeStatus() == JApiChangeStatus.NEW || getJApiClass().getChangeStatus() == JApiChangeStatus.REMOVED; } protected void calcChangeLog() { calcChangeLogForClass(); } protected void addClassTitle(List<String> list) { if (list.isEmpty()) { list.add(String.format(" list.add(""); } } private void calcChangelogForMethod(JApiMethod method, boolean checkReturnType) { switch (method.getChangeStatus()) { case NEW: addClassTitle(newFeature); newFeature.add(String.format("* `%s` was added", MethodName.name(method.getNewMethod().get()))); break; case REMOVED: addClassTitle(breakingChange); breakingChange.add(String.format("* `%s` was removed", MethodName.name(method.getOldMethod().get()))); break; case MODIFIED: if (!checkReturnType){ if (!method.getOldMethod().get().getLongName().equals(method.getNewMethod().get().getLongName())) { addClassTitle(breakingChange); breakingChange.add(String.format("* `%s` -> `%s`", MethodName.name(method.getOldMethod().get()), MethodName.name(method.getNewMethod().get()))); } } else { addClassTitle(breakingChange); breakingChange.add(String.format("* `%s %s` -> `%s %s`", method.getReturnType().getOldReturnType(), MethodName.name(method.getOldMethod().get()), method.getReturnType().getNewReturnType(), MethodName.name(method.getNewMethod().get()))); } break; } } }
Please add space in between.
private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); model.addAttribute("name",user.getName()); } }
model.addAttribute("name",user.getName());
private void initializeModel(Model model, OAuth2AuthenticationToken token) { if (token != null) { final OAuth2User user = token.getPrincipal(); model.addAttribute("grant_type", user.getAuthorities()); model.addAllAttributes(user.getAttributes()); model.addAttribute("name", user.getName()); } }
class WebController { @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "home"; } @GetMapping(value = "/greeting") public String greeting(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "greeting"; } @GetMapping(value = "/home") public String home(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "home"; } }
class WebController { @GetMapping(value = "/") public String index(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "home"; } @GetMapping(value = "/greeting") public String greeting(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "greeting"; } @GetMapping(value = "/home") public String home(Model model, OAuth2AuthenticationToken token) { initializeModel(model, token); return "home"; } }
Is this okay to put certificate in this file from security point of view ?
String getIsolatedSigningCertificate() { String signingCertificate = Configuration.getGlobalConfiguration().get("isolatedSigningCertificate"); if (signingCertificate == null) { signingCertificate = "MIIC+DCCAeCgAwIBAgIITwYg6gewUZswDQYJKoZIhvcNAQELBQAwMzExMC8GA1UEAxMoQXR0ZXN0YXRpb25Jc" + "29sYXRlZE1hbmFnZW1lbnRDZXJ0aWZpY2F0ZTAeFw0yMTAxMTkyMDEyNTZaFw0yMjAxMTkyMDEyNTZaMDMxMTAvBgNVBAMTK" + "EF0dGVzdGF0aW9uSXNvbGF0ZWRNYW5hZ2VtZW50Q2VydGlmaWNhdGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBA" + "QDZlz+tRMz5knbLWYY+CJmgzJ4WIoKAkVs6fwm2JZt3ig8NKWDR9XC0Byixj4cCNOanvqSy2eLLhm30jNdc0o3ObLJVro+4W" + "sI2p19DuV5PrpyCiZHDPb5DmxtMnsXpYV1ePIxveLgNcTe4lu/pRGxaCcDxSWLG1DL4BsMXzLE2GQaCVLzPHI0NJVvd/DDXz" + "bHK7tX45F8kRaXhnSd3fOaS4spw57r9oZfL1fzM03DVptnEmBrpsxP8Kw7aLv5ZYLhX/rK9H7MrM4NA6g/g3dw4w/rf8025h" + "JaAUJ+T68oARiXXBqDWCIkPXhkmukcmmP6Sl8mnNAqRG55iRY4AqzLRAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIh" + "vcNAQELBQADggEBAJzbrs1pGiT6wwApfqT8jAM5OD9ylh8U9MCJOnMbigFAdp96N+TX568NUGPIssFB2oNNqI/Ai2hovPhdC" + "gDuPY2ngj2t9qyBhpqnQ0JWJ/Hpl4fZfbma9O9V18z9nLDmbOvbDNm11n1txZlwd+/h8Fh4CpXePhTWK2LIMYZ6WNBRRsanl" + "kF83yGFWMCShNqUiMGd9sWkRaaeJY9KtXxecQB3a/+SHKV2OESfA7inT3MXpwzCWAogrOk4GxzyWNPpsU7gHgErsiw+lKF8B" + "KrCArm0UjKvqhKeDni2zhWTYSQS2NLWnQwNvkxVdgdCl1lqtPeJ/qYPR8ZA+ksm36c7hBQ="; } return signingCertificate; }
signingCertificate = "MIIC+DCCAeCgAwIBAgIITwYg6gewUZswDQYJKoZIhvcNAQELBQAwMzExMC8GA1UEAxMoQXR0ZXN0YXRpb25Jc"
String getIsolatedSigningCertificate() { String signingCertificate = Configuration.getGlobalConfiguration().get("isolatedSigningCertificate"); if (signingCertificate == null) { signingCertificate = "MIIC+DCCAeCgAwIBAgIITwYg6gewUZswDQYJKoZIhvcNAQELBQAwMzExMC8GA1UEAxMoQXR0ZXN0YXRpb25Jc" + "29sYXRlZE1hbmFnZW1lbnRDZXJ0aWZpY2F0ZTAeFw0yMTAxMTkyMDEyNTZaFw0yMjAxMTkyMDEyNTZaMDMxMTAvBgNVBAMTK" + "EF0dGVzdGF0aW9uSXNvbGF0ZWRNYW5hZ2VtZW50Q2VydGlmaWNhdGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBA" + "QDZlz+tRMz5knbLWYY+CJmgzJ4WIoKAkVs6fwm2JZt3ig8NKWDR9XC0Byixj4cCNOanvqSy2eLLhm30jNdc0o3ObLJVro+4W" + "sI2p19DuV5PrpyCiZHDPb5DmxtMnsXpYV1ePIxveLgNcTe4lu/pRGxaCcDxSWLG1DL4BsMXzLE2GQaCVLzPHI0NJVvd/DDXz" + "bHK7tX45F8kRaXhnSd3fOaS4spw57r9oZfL1fzM03DVptnEmBrpsxP8Kw7aLv5ZYLhX/rK9H7MrM4NA6g/g3dw4w/rf8025h" + "JaAUJ+T68oARiXXBqDWCIkPXhkmukcmmP6Sl8mnNAqRG55iRY4AqzLRAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIh" + "vcNAQELBQADggEBAJzbrs1pGiT6wwApfqT8jAM5OD9ylh8U9MCJOnMbigFAdp96N+TX568NUGPIssFB2oNNqI/Ai2hovPhdC" + "gDuPY2ngj2t9qyBhpqnQ0JWJ/Hpl4fZfbma9O9V18z9nLDmbOvbDNm11n1txZlwd+/h8Fh4CpXePhTWK2LIMYZ6WNBRRsanl" + "kF83yGFWMCShNqUiMGd9sWkRaaeJY9KtXxecQB3a/+SHKV2OESfA7inT3MXpwzCWAogrOk4GxzyWNPpsU7gHgErsiw+lKF8B" + "KrCArm0UjKvqhKeDni2zhWTYSQS2NLWnQwNvkxVdgdCl1lqtPeJ/qYPR8ZA+ksm36c7hBQ="; } return signingCertificate; }
class AttestationClientTestBase extends TestBase { private static final String DATAPLANE_SCOPE = "https: final ClientLogger logger = new ClientLogger(AttestationClientTestBase.class); enum ClientTypes { SHARED, ISOLATED, AAD, } ClientTypes classifyClient(String clientUri) { assertNotNull(clientUri); String regionShortName = getLocationShortName(); String sharedUri = "https: if (sharedUri.equals(clientUri)) { return ClientTypes.SHARED; } else if (getIsolatedUrl().equals(clientUri)) { return ClientTypes.ISOLATED; } else if (getAadUrl().equals(clientUri)) { return ClientTypes.AAD; } throw new IllegalArgumentException(); } InputStream base64ToStream(String base64) { byte[] decoded = Base64.getDecoder().decode(base64); return new ByteArrayInputStream(decoded); } AttestationClientBuilder getBuilder(HttpClient httpClient, String clientUri) { return new AttestationClientBuilder().pipeline(getHttpPipeline(httpClient)).instanceUrl(clientUri); } HttpPipeline getHttpPipeline(HttpClient httpClient) { TokenCredential credential = null; if (!interceptorManager.isPlaybackMode()) { credential = new EnvironmentCredentialBuilder().httpClient(httpClient).build(); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, DATAPLANE_SCOPE)); } if (getTestMode() == TestMode.RECORD) { policies.add(interceptorManager.getRecordPolicy()); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .build(); } Mono<JWTClaimsSet> verifyAttestationToken(HttpClient httpClient, String clientUri, String attestationToken) { final SignedJWT token; try { token = SignedJWT.parse(attestationToken); } catch (ParseException e) { return Mono.error(logger.logThrowableAsError(e)); } SignedJWT finalToken = token; return getSigningCertificateByKeyId(token, httpClient, clientUri) .handle((cert, sink) -> { final PublicKey key = cert.getPublicKey(); final RSAPublicKey rsaKey = (RSAPublicKey) key; final RSASSAVerifier verifier = new RSASSAVerifier(rsaKey); try { assertTrue(finalToken.verify(verifier)); } catch (JOSEException e) { sink.error(logger.logThrowableAsError(e)); return; } final JWTClaimsSet claims; try { claims = finalToken.getJWTClaimsSet(); } catch (ParseException e) { sink.error(logger.logThrowableAsError(e)); return; } assertNotNull(claims); sink.next(claims); }); } /** * Create a JWS Signer from the specified PKCS8 encoded signing key. * @param signingKeyBase64 Base64 encoded PKCS8 encoded RSA Private key. * @return JWSSigner created over the specified signing key. */ JWSSigner getJwsSigner(String signingKeyBase64) { byte[] signingKey = Base64.getDecoder().decode(signingKeyBase64); PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(signingKey); KeyFactory keyFactory; try { keyFactory = KeyFactory.getInstance("RSA"); } catch (NoSuchAlgorithmException e) { throw logger.logThrowableAsError(new RuntimeException(e)); } PrivateKey privateKey; try { privateKey = keyFactory.generatePrivate(keySpec); } catch (InvalidKeySpecException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } return new RSASSASigner(privateKey); } /** * Find the signing certificate associated with the specified SignedJWT token. * * This method depends on the token * @param token - MAA generated token on which to find the certificate. * @param client - Http Client used to retrieve signing certificates. * @param clientUri - Base URI for the attestation client. * @return X509Certificate which will have been used to sign the token. */ Mono<X509Certificate> getSigningCertificateByKeyId(SignedJWT token, HttpClient client, String clientUri) { AttestationClientBuilder builder = getBuilder(client, clientUri); return builder.buildSigningCertificatesAsyncClient().get() .handle((keySet, sink) -> { final CertificateFactory cf; try { cf = CertificateFactory.getInstance("X.509"); } catch (CertificateException e) { sink.error(logger.logThrowableAsError(e)); return; } final String keyId = token.getHeader().getKeyID(); boolean foundKey = false; for (JsonWebKey key : keySet.getKeys()) { if (keyId.equals(key.getKid())) { final Certificate cert; try { cert = cf.generateCertificate(base64ToStream(key.getX5C().get(0))); foundKey = true; } catch (CertificateException e) { sink.error(logger.logThrowableAsError(e)); return; } assertTrue(cert instanceof X509Certificate); sink.next((X509Certificate) cert); } } if (!foundKey) { sink.error(logger.logThrowableAsError(new RuntimeException(String.format( "Key %s not found in JSON Web Key Set", keyId)))); } }); } String getIsolatedSigningKey() { String signingKey = Configuration.getGlobalConfiguration().get("isolatedSigningKey"); if (signingKey == null) { signingKey = "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDZlz+tRMz5knbLWYY+CJmgzJ4WIoKAkVs6fwm2" + "JZt3ig8NKWDR9XC0Byixj4cCNOanvqSy2eLLhm30jNdc0o3ObLJVro+4WsI2p19DuV5PrpyCiZHDPb5DmxtMnsXpYV1ePIx" + "veLgNcTe4lu/pRGxaCcDxSWLG1DL4BsMXzLE2GQaCVLzPHI0NJVvd/DDXzbHK7tX45F8kRaXhnSd3fOaS4spw57r9oZfL1f" + "zM03DVptnEmBrpsxP8Kw7aLv5ZYLhX/rK9H7MrM4NA6g/g3dw4w/rf8025hJaAUJ+T68oARiXXBqDWCIkPXhkmukcmmP6Sl" + "8mnNAqRG55iRY4AqzLRAgMBAAECggEAU0dTJMLPXLnU47Fo8rch7WxDGR+uKPz5GKNkmSU9onvhlN0AZHt23kBbL9JKDusm" + "WI9bw+QmrFTQIqgBCVLA2X+6pZaBBUMfUAGxMV9yHDctSbzTYBFyj7d+tE2UW+Va8eVkrolakDKD7A9A1VvNyIwxH2hB+O1" + "gcJNN+f7q2FP4zpmJjEsMm9IL9sZ+6aiQSSsFQEih92yZEtHJ6Ohe8mdvSkmi3Ki0TSeqDfh4CksRnd6Bv/6oBAV48WaRa3" + "yQ7tnsBrhXrCRzXRbiCcJP+C/Eqe3gkXvWuzq+cgicX95qh05VPnf5Pa6w5N4wEgwmoorloYfDStYcthtKidUefQKBgQD3h" + "WXciacPcydjAfH+0WyoszqBup/B5OBw/ZNlv531EzongB8V7+3pCs1/gF4+H3qvIRkL7JWt4HVtZEBp4D3tpWQHoYpE6wxA" + "0oeGM/DXbCQttCpR3eHZXYa9hbuQZuFjkclXjDBIk/q+U178+GRiB7zZb7JGNCBwlpCkTh+WywKBgQDhC2GnDCAGjwjDHa5" + "Nf4qLWyISN34KoEF9hgAYIvNYzAwwp8J/xxxQ7j8hf5XJPnld1UprVrhrYL0aGSc0kNWri1pZx2PDge42XK9boRARvuuK5U" + "aV3VNk7xb7vHzjoNDJWzmLlEaVPLFQPHVWHobTMwQWbzKZmopTA+QuV68NUwKBgQCbMmU/9n9tTIKxrZKSd7VtwZM5rE5nQ" + "J8JubUl4xOjir637bmQA7RknoVjIJX21b4S+Om/dEQVlduLD4Tj3dp2m3Ew57TOqaIxMtAO8ZpdOE0m6wRt+HWX2PCW/Lcy" + "P4+q4sofvqK3nzFlDNlOPGCUps1eeI6LPjvo3D8tBl8AKQKBgQCHhv8sRtUSnhk8yCcsbN7Wxe9i4SB67cADBCwSXRoII/pDY" + "wRzR0n6Q0Cpv9hI9eLJa6YBtpwhroSzruo5ce/7+1RSNQ4Ts6/t9St2Fy1CQqQ/ZYx4vG14n7RLrlvYCgUy/klNkeJgBckS9R" + "YE4yV3E4YmrJjggH1FOVa1wgCeGQKBgQCbCKeM4EahWIyTBiZsTQ/l5hwhjPCrxncbyA2EZWNg3Ri6nuMIQzoBrCoX9L8t7e0" + "CKWAN0oM2Cn1VIJhsiE75dzN3vvGBcNZ9y+BwbwxDIAhrztyKKJS0h9YmAUVr+w5WsUPyMUPQ0/1wdTdxvKqQpriddrvyKRSJ" + "M9fb29+cwQ=="; } return signingKey; } String getPolicySigningCertificate0() { String certificate = Configuration.getGlobalConfiguration().get("policySigningCertificate0"); if (certificate == null) { certificate = "MIIC1jCCAb6gAwIBAgIIAxfcH6Co5DowDQYJKoZIhvcNAQELBQAwIjEgMB4GA1UEAxMXQXR0ZXN0YXRpb25DZXJ" + "0aWZpY2F0ZTAwHhcNMjEwMTE5MjAxMjU2WhcNMjIwMTE5MjAxMjU2WjAiMSAwHgYDVQQDExdBdHRlc3RhdGlvbkNlcnRpZmlj" + "YXRlMDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOOpb5GvUCuOYiB4ZazIePtSazdXGDyjtFlr4ulo1VY1Ai91X" + "IcWIPELCV1OfQiIoJlj096u3cirP1GvCKgb4FTNHHi7omDaQvYRmuZZ6KXrqNi5Iu/jKjGgjwYt+FYV/9eqYCWdyS0RjMbKw7" + "sZUvBxTDeTqQunwbjPZ1y4JbxXx6xwcZJHfwD6g7aHslsblHh4zM1mhiuoIMpNUeeThLwQTD6oGSmIt+hqRbfvd3Ljr/v7W3m" + "SKvw5X9L85PNHaDIUd4vHSDiytZUoXyhtbC8RKGzxgZCz6gFwM5JF6QhYE/A84HFH7JZ3FKk1UJBoTjcv63BshT7Pt3fYMZqV" + "SzkCAwEAAaMQMA4wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAC+YKGp6foV94lYQB/yNQ53Bk+YeR4dgAkR99U" + "t1VvvXKyWnka/X8QRjKoaPULDZH4+9ZXg6lSfhuEGTSPUzO294mlNbF6n6cuuuawe3OeuZUO53b4xYXPRv898FBRxdD+FCW/f" + "A5HLBrGItfk31+aNUFryCd5RrJfJU8Rurm+7uGPtS16Ft0P7xSnL0C7nfHNVuEKFV0ZbzgzXlzkKQT4d3fYpvOxzYoXImxzwz" + "W/jzZjN3aKbOlmY2LyW8J5BKKgA3C4FRWwCTmgqYp2vQhsw1HgCeBjmBN5/imnk2lsgjrvvSdlkXOnNf5atibuguYzdakz99b" + "wwWWsd5HddtcyA=="; } return certificate; } private static String getLocationShortName() { String shortName = Configuration.getGlobalConfiguration().get("locationShortName"); if (shortName == null) { shortName = "wus"; } return shortName; } private static String getIsolatedUrl() { String url = Configuration.getGlobalConfiguration().get("ATTESTATION_ISOLATED_URL"); if (url == null) { url = "https: } return url; } private static String getAadUrl() { String url = Configuration.getGlobalConfiguration().get("ATTESTATION_AAD_URL"); if (url == null) { url = "https: } return url; } static Stream<Arguments> getAttestationClients() { final String regionShortName = getLocationShortName(); return getHttpClients().flatMap(httpClient -> Stream.of( Arguments.of(httpClient, "https: Arguments.of(httpClient, getIsolatedUrl()), Arguments.of(httpClient, getAadUrl()))); } static Stream<Arguments> getPolicyClients() { return getAttestationClients().flatMap(clientParams -> Stream.of( Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.OPEN_ENCLAVE), Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.TPM), Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.SGX_ENCLAVE))); } }
class AttestationClientTestBase extends TestBase { private static final String DATAPLANE_SCOPE = "https: final ClientLogger logger = new ClientLogger(AttestationClientTestBase.class); enum ClientTypes { SHARED, ISOLATED, AAD, } /** * Determine the Attestation instance type based on the client URI provided. * @param clientUri - URI for the attestation client. * @return the ClientTypes corresponding to the specified client URI. */ ClientTypes classifyClient(String clientUri) { assertNotNull(clientUri); String regionShortName = getLocationShortName(); String sharedUri = "https: if (sharedUri.equals(clientUri)) { return ClientTypes.SHARED; } else if (getIsolatedUrl().equals(clientUri)) { return ClientTypes.ISOLATED; } else if (getAadUrl().equals(clientUri)) { return ClientTypes.AAD; } throw new IllegalArgumentException(); } /** * Convert a base64 encoded string into a byte stream. * @param base64 - Base64 encoded string to be decoded * @return stream of bytes encoded in the base64 encoded string. */ InputStream base64ToStream(String base64) { byte[] decoded = Base64.getDecoder().decode(base64); return new ByteArrayInputStream(decoded); } /** * Retrieve an attestationClientBuilder for the specified HTTP client and client URI * @param httpClient - HTTP client ot be used for the attestation client. * @param clientUri - Client base URI to access the service. * @return Returns an attestation client builder corresponding to the httpClient and clientUri. */ AttestationClientBuilder getBuilder(HttpClient httpClient, String clientUri) { return new AttestationClientBuilder().pipeline(getHttpPipeline(httpClient)).instanceUrl(clientUri); } /** * Retrieves an HTTP pipeline configured on the specified HTTP pipeline. * * Used by getBuilder(). * @param httpClient - Client on which to configure the HTTP pipeline. * @return an HttpPipeline object configured for the MAA service on the specified http client. */ private HttpPipeline getHttpPipeline(HttpClient httpClient) { TokenCredential credential = null; if (!interceptorManager.isPlaybackMode()) { credential = new EnvironmentCredentialBuilder().httpClient(httpClient).build(); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, DATAPLANE_SCOPE)); } if (getTestMode() == TestMode.RECORD) { policies.add(interceptorManager.getRecordPolicy()); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .build(); } /** * Verifies an MAA attestation token and returns the set of attestation claims embedded in the token. * @param httpClient - the HTTP client which was used to retrieve the token (used to retrieve the signing certificates for the attestation instance) * @param clientUri - the base URI used to access the attestation instance (used to retrieve the signing certificates for the attestation instance) * @param attestationToken - Json Web Token issued by the Attestation Service. * @return a JWTClaimSet containing the claims associated with the attestation token. */ Mono<JWTClaimsSet> verifyAttestationToken(HttpClient httpClient, String clientUri, String attestationToken) { final SignedJWT token; try { token = SignedJWT.parse(attestationToken); } catch (ParseException e) { return Mono.error(logger.logThrowableAsError(e)); } SignedJWT finalToken = token; return getSigningCertificateByKeyId(token, httpClient, clientUri) .handle((cert, sink) -> { final PublicKey key = cert.getPublicKey(); final RSAPublicKey rsaKey = (RSAPublicKey) key; final RSASSAVerifier verifier = new RSASSAVerifier(rsaKey); try { assertTrue(finalToken.verify(verifier)); } catch (JOSEException e) { sink.error(logger.logThrowableAsError(e)); return; } final JWTClaimsSet claims; try { claims = finalToken.getJWTClaimsSet(); } catch (ParseException e) { sink.error(logger.logThrowableAsError(e)); return; } assertNotNull(claims); sink.next(claims); }); } /** * Create a JWS Signer from the specified PKCS8 encoded signing key. * @param signingKeyBase64 Base64 encoded PKCS8 encoded RSA Private key. * @return JWSSigner created over the specified signing key. */ JWSSigner getJwsSigner(String signingKeyBase64) { byte[] signingKey = Base64.getDecoder().decode(signingKeyBase64); PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(signingKey); KeyFactory keyFactory; try { keyFactory = KeyFactory.getInstance("RSA"); } catch (NoSuchAlgorithmException e) { throw logger.logThrowableAsError(new RuntimeException(e)); } PrivateKey privateKey; try { privateKey = keyFactory.generatePrivate(keySpec); } catch (InvalidKeySpecException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } return new RSASSASigner(privateKey); } /** * Find the signing certificate associated with the specified SignedJWT token. * * This method depends on the token * @param token - MAA generated token on which to find the certificate. * @param client - Http Client used to retrieve signing certificates. * @param clientUri - Base URI for the attestation client. * @return X509Certificate which will have been used to sign the token. */ Mono<X509Certificate> getSigningCertificateByKeyId(SignedJWT token, HttpClient client, String clientUri) { AttestationClientBuilder builder = getBuilder(client, clientUri); return builder.buildSigningCertificatesAsyncClient().get() .handle((keySet, sink) -> { final CertificateFactory cf; try { cf = CertificateFactory.getInstance("X.509"); } catch (CertificateException e) { sink.error(logger.logThrowableAsError(e)); return; } final String keyId = token.getHeader().getKeyID(); boolean foundKey = false; for (JsonWebKey key : keySet.getKeys()) { if (keyId.equals(key.getKid())) { final Certificate cert; try { cert = cf.generateCertificate(base64ToStream(key.getX5C().get(0))); foundKey = true; } catch (CertificateException e) { sink.error(logger.logThrowableAsError(e)); return; } assertTrue(cert instanceof X509Certificate); sink.next((X509Certificate) cert); } } if (!foundKey) { sink.error(logger.logThrowableAsError(new RuntimeException(String.format( "Key %s not found in JSON Web Key Set", keyId)))); } }); } /** * Retrieve the signing certificate used for the isolated attestation instance. * @return Returns a base64 encoded X.509 certificate used to sign policy documents. */ /** * Retrieve the signing key used for the isolated attestation instance. * @return Returns a base64 encoded RSA Key used to sign policy documents. */ String getIsolatedSigningKey() { String signingKey = Configuration.getGlobalConfiguration().get("isolatedSigningKey"); if (signingKey == null) { signingKey = "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDZlz+tRMz5knbLWYY+CJmgzJ4WIoKAkVs6fwm2" + "JZt3ig8NKWDR9XC0Byixj4cCNOanvqSy2eLLhm30jNdc0o3ObLJVro+4WsI2p19DuV5PrpyCiZHDPb5DmxtMnsXpYV1ePIx" + "veLgNcTe4lu/pRGxaCcDxSWLG1DL4BsMXzLE2GQaCVLzPHI0NJVvd/DDXzbHK7tX45F8kRaXhnSd3fOaS4spw57r9oZfL1f" + "zM03DVptnEmBrpsxP8Kw7aLv5ZYLhX/rK9H7MrM4NA6g/g3dw4w/rf8025hJaAUJ+T68oARiXXBqDWCIkPXhkmukcmmP6Sl" + "8mnNAqRG55iRY4AqzLRAgMBAAECggEAU0dTJMLPXLnU47Fo8rch7WxDGR+uKPz5GKNkmSU9onvhlN0AZHt23kBbL9JKDusm" + "WI9bw+QmrFTQIqgBCVLA2X+6pZaBBUMfUAGxMV9yHDctSbzTYBFyj7d+tE2UW+Va8eVkrolakDKD7A9A1VvNyIwxH2hB+O1" + "gcJNN+f7q2FP4zpmJjEsMm9IL9sZ+6aiQSSsFQEih92yZEtHJ6Ohe8mdvSkmi3Ki0TSeqDfh4CksRnd6Bv/6oBAV48WaRa3" + "yQ7tnsBrhXrCRzXRbiCcJP+C/Eqe3gkXvWuzq+cgicX95qh05VPnf5Pa6w5N4wEgwmoorloYfDStYcthtKidUefQKBgQD3h" + "WXciacPcydjAfH+0WyoszqBup/B5OBw/ZNlv531EzongB8V7+3pCs1/gF4+H3qvIRkL7JWt4HVtZEBp4D3tpWQHoYpE6wxA" + "0oeGM/DXbCQttCpR3eHZXYa9hbuQZuFjkclXjDBIk/q+U178+GRiB7zZb7JGNCBwlpCkTh+WywKBgQDhC2GnDCAGjwjDHa5" + "Nf4qLWyISN34KoEF9hgAYIvNYzAwwp8J/xxxQ7j8hf5XJPnld1UprVrhrYL0aGSc0kNWri1pZx2PDge42XK9boRARvuuK5U" + "aV3VNk7xb7vHzjoNDJWzmLlEaVPLFQPHVWHobTMwQWbzKZmopTA+QuV68NUwKBgQCbMmU/9n9tTIKxrZKSd7VtwZM5rE5nQ" + "J8JubUl4xOjir637bmQA7RknoVjIJX21b4S+Om/dEQVlduLD4Tj3dp2m3Ew57TOqaIxMtAO8ZpdOE0m6wRt+HWX2PCW/Lcy" + "P4+q4sofvqK3nzFlDNlOPGCUps1eeI6LPjvo3D8tBl8AKQKBgQCHhv8sRtUSnhk8yCcsbN7Wxe9i4SB67cADBCwSXRoII/pDY" + "wRzR0n6Q0Cpv9hI9eLJa6YBtpwhroSzruo5ce/7+1RSNQ4Ts6/t9St2Fy1CQqQ/ZYx4vG14n7RLrlvYCgUy/klNkeJgBckS9R" + "YE4yV3E4YmrJjggH1FOVa1wgCeGQKBgQCbCKeM4EahWIyTBiZsTQ/l5hwhjPCrxncbyA2EZWNg3Ri6nuMIQzoBrCoX9L8t7e0" + "CKWAN0oM2Cn1VIJhsiE75dzN3vvGBcNZ9y+BwbwxDIAhrztyKKJS0h9YmAUVr+w5WsUPyMUPQ0/1wdTdxvKqQpriddrvyKRSJ" + "M9fb29+cwQ=="; } return signingKey; } /** * Retrieves a certificate which can be used to sign attestation policies. * @return Returns a base64 encoded X.509 certificate which can be used to sign attestation policies. */ String getPolicySigningCertificate0() { String certificate = Configuration.getGlobalConfiguration().get("policySigningCertificate0"); if (certificate == null) { certificate = "MIIC1jCCAb6gAwIBAgIIAxfcH6Co5DowDQYJKoZIhvcNAQELBQAwIjEgMB4GA1UEAxMXQXR0ZXN0YXRpb25DZXJ" + "0aWZpY2F0ZTAwHhcNMjEwMTE5MjAxMjU2WhcNMjIwMTE5MjAxMjU2WjAiMSAwHgYDVQQDExdBdHRlc3RhdGlvbkNlcnRpZmlj" + "YXRlMDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOOpb5GvUCuOYiB4ZazIePtSazdXGDyjtFlr4ulo1VY1Ai91X" + "IcWIPELCV1OfQiIoJlj096u3cirP1GvCKgb4FTNHHi7omDaQvYRmuZZ6KXrqNi5Iu/jKjGgjwYt+FYV/9eqYCWdyS0RjMbKw7" + "sZUvBxTDeTqQunwbjPZ1y4JbxXx6xwcZJHfwD6g7aHslsblHh4zM1mhiuoIMpNUeeThLwQTD6oGSmIt+hqRbfvd3Ljr/v7W3m" + "SKvw5X9L85PNHaDIUd4vHSDiytZUoXyhtbC8RKGzxgZCz6gFwM5JF6QhYE/A84HFH7JZ3FKk1UJBoTjcv63BshT7Pt3fYMZqV" + "SzkCAwEAAaMQMA4wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAC+YKGp6foV94lYQB/yNQ53Bk+YeR4dgAkR99U" + "t1VvvXKyWnka/X8QRjKoaPULDZH4+9ZXg6lSfhuEGTSPUzO294mlNbF6n6cuuuawe3OeuZUO53b4xYXPRv898FBRxdD+FCW/f" + "A5HLBrGItfk31+aNUFryCd5RrJfJU8Rurm+7uGPtS16Ft0P7xSnL0C7nfHNVuEKFV0ZbzgzXlzkKQT4d3fYpvOxzYoXImxzwz" + "W/jzZjN3aKbOlmY2LyW8J5BKKgA3C4FRWwCTmgqYp2vQhsw1HgCeBjmBN5/imnk2lsgjrvvSdlkXOnNf5atibuguYzdakz99b" + "wwWWsd5HddtcyA=="; } return certificate; } /** * Returns the location in which the tests are running. * @return returns the location in which the tests are running. */ private static String getLocationShortName() { String shortName = Configuration.getGlobalConfiguration().get("locationShortName"); if (shortName == null) { shortName = "wus"; } return shortName; } /** * Returns the url associated with the isolated MAA instance. * @return the url associated with the isolated MAA instance. */ private static String getIsolatedUrl() { String url = Configuration.getGlobalConfiguration().get("ATTESTATION_ISOLATED_URL"); if (url == null) { url = "https: } return url; } /** * Returns the url associated with the AAD MAA instance. * @return the url associated with the AAD MAA instance. */ private static String getAadUrl() { String url = Configuration.getGlobalConfiguration().get("ATTESTATION_AAD_URL"); if (url == null) { url = "https: } return url; } /** * Returns the set of clients to be used to test the attestation service. * @return a stream of Argument objects associated with each of the regions on which to run the attestation test. */ static Stream<Arguments> getAttestationClients() { final String regionShortName = getLocationShortName(); return getHttpClients().flatMap(httpClient -> Stream.of( Arguments.of(httpClient, "https: Arguments.of(httpClient, getIsolatedUrl()), Arguments.of(httpClient, getAadUrl()))); } /** * Returns the set of clients and attestation types used for attestation policy APIs. * @return a stream of Argument objects associated with each of the regions on which to run the attestation test. */ static Stream<Arguments> getPolicyClients() { return getAttestationClients().flatMap(clientParams -> Stream.of( Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.OPEN_ENCLAVE), Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.TPM), Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.SGX_ENCLAVE))); } }
The certificate has to go somewhere. It's either going to be in the recorded session or it's going to be in the source code, but the service requires that the client present a valid X.509 certificate. So we either don't bother testing most of the service functionality in playback tests or we add the certificate (and it's associated key). In this case, it's a self signed certificate wrapped around a generate RSA key pair (that's the key returned by the `getIsolatedSigningKey`, which was generated for this purpose. It's just a certificate and a key - they don't protect actual resources (I guess technically they are the isolated signing key for an attestation service instance I used for testing when I created these tests, but I'm not sure that even with that caveat, they have any practical value - I already deleted that instance.
String getIsolatedSigningCertificate() { String signingCertificate = Configuration.getGlobalConfiguration().get("isolatedSigningCertificate"); if (signingCertificate == null) { signingCertificate = "MIIC+DCCAeCgAwIBAgIITwYg6gewUZswDQYJKoZIhvcNAQELBQAwMzExMC8GA1UEAxMoQXR0ZXN0YXRpb25Jc" + "29sYXRlZE1hbmFnZW1lbnRDZXJ0aWZpY2F0ZTAeFw0yMTAxMTkyMDEyNTZaFw0yMjAxMTkyMDEyNTZaMDMxMTAvBgNVBAMTK" + "EF0dGVzdGF0aW9uSXNvbGF0ZWRNYW5hZ2VtZW50Q2VydGlmaWNhdGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBA" + "QDZlz+tRMz5knbLWYY+CJmgzJ4WIoKAkVs6fwm2JZt3ig8NKWDR9XC0Byixj4cCNOanvqSy2eLLhm30jNdc0o3ObLJVro+4W" + "sI2p19DuV5PrpyCiZHDPb5DmxtMnsXpYV1ePIxveLgNcTe4lu/pRGxaCcDxSWLG1DL4BsMXzLE2GQaCVLzPHI0NJVvd/DDXz" + "bHK7tX45F8kRaXhnSd3fOaS4spw57r9oZfL1fzM03DVptnEmBrpsxP8Kw7aLv5ZYLhX/rK9H7MrM4NA6g/g3dw4w/rf8025h" + "JaAUJ+T68oARiXXBqDWCIkPXhkmukcmmP6Sl8mnNAqRG55iRY4AqzLRAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIh" + "vcNAQELBQADggEBAJzbrs1pGiT6wwApfqT8jAM5OD9ylh8U9MCJOnMbigFAdp96N+TX568NUGPIssFB2oNNqI/Ai2hovPhdC" + "gDuPY2ngj2t9qyBhpqnQ0JWJ/Hpl4fZfbma9O9V18z9nLDmbOvbDNm11n1txZlwd+/h8Fh4CpXePhTWK2LIMYZ6WNBRRsanl" + "kF83yGFWMCShNqUiMGd9sWkRaaeJY9KtXxecQB3a/+SHKV2OESfA7inT3MXpwzCWAogrOk4GxzyWNPpsU7gHgErsiw+lKF8B" + "KrCArm0UjKvqhKeDni2zhWTYSQS2NLWnQwNvkxVdgdCl1lqtPeJ/qYPR8ZA+ksm36c7hBQ="; } return signingCertificate; }
signingCertificate = "MIIC+DCCAeCgAwIBAgIITwYg6gewUZswDQYJKoZIhvcNAQELBQAwMzExMC8GA1UEAxMoQXR0ZXN0YXRpb25Jc"
String getIsolatedSigningCertificate() { String signingCertificate = Configuration.getGlobalConfiguration().get("isolatedSigningCertificate"); if (signingCertificate == null) { signingCertificate = "MIIC+DCCAeCgAwIBAgIITwYg6gewUZswDQYJKoZIhvcNAQELBQAwMzExMC8GA1UEAxMoQXR0ZXN0YXRpb25Jc" + "29sYXRlZE1hbmFnZW1lbnRDZXJ0aWZpY2F0ZTAeFw0yMTAxMTkyMDEyNTZaFw0yMjAxMTkyMDEyNTZaMDMxMTAvBgNVBAMTK" + "EF0dGVzdGF0aW9uSXNvbGF0ZWRNYW5hZ2VtZW50Q2VydGlmaWNhdGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBA" + "QDZlz+tRMz5knbLWYY+CJmgzJ4WIoKAkVs6fwm2JZt3ig8NKWDR9XC0Byixj4cCNOanvqSy2eLLhm30jNdc0o3ObLJVro+4W" + "sI2p19DuV5PrpyCiZHDPb5DmxtMnsXpYV1ePIxveLgNcTe4lu/pRGxaCcDxSWLG1DL4BsMXzLE2GQaCVLzPHI0NJVvd/DDXz" + "bHK7tX45F8kRaXhnSd3fOaS4spw57r9oZfL1fzM03DVptnEmBrpsxP8Kw7aLv5ZYLhX/rK9H7MrM4NA6g/g3dw4w/rf8025h" + "JaAUJ+T68oARiXXBqDWCIkPXhkmukcmmP6Sl8mnNAqRG55iRY4AqzLRAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIh" + "vcNAQELBQADggEBAJzbrs1pGiT6wwApfqT8jAM5OD9ylh8U9MCJOnMbigFAdp96N+TX568NUGPIssFB2oNNqI/Ai2hovPhdC" + "gDuPY2ngj2t9qyBhpqnQ0JWJ/Hpl4fZfbma9O9V18z9nLDmbOvbDNm11n1txZlwd+/h8Fh4CpXePhTWK2LIMYZ6WNBRRsanl" + "kF83yGFWMCShNqUiMGd9sWkRaaeJY9KtXxecQB3a/+SHKV2OESfA7inT3MXpwzCWAogrOk4GxzyWNPpsU7gHgErsiw+lKF8B" + "KrCArm0UjKvqhKeDni2zhWTYSQS2NLWnQwNvkxVdgdCl1lqtPeJ/qYPR8ZA+ksm36c7hBQ="; } return signingCertificate; }
class AttestationClientTestBase extends TestBase { private static final String DATAPLANE_SCOPE = "https: final ClientLogger logger = new ClientLogger(AttestationClientTestBase.class); enum ClientTypes { SHARED, ISOLATED, AAD, } ClientTypes classifyClient(String clientUri) { assertNotNull(clientUri); String regionShortName = getLocationShortName(); String sharedUri = "https: if (sharedUri.equals(clientUri)) { return ClientTypes.SHARED; } else if (getIsolatedUrl().equals(clientUri)) { return ClientTypes.ISOLATED; } else if (getAadUrl().equals(clientUri)) { return ClientTypes.AAD; } throw new IllegalArgumentException(); } InputStream base64ToStream(String base64) { byte[] decoded = Base64.getDecoder().decode(base64); return new ByteArrayInputStream(decoded); } AttestationClientBuilder getBuilder(HttpClient httpClient, String clientUri) { return new AttestationClientBuilder().pipeline(getHttpPipeline(httpClient)).instanceUrl(clientUri); } HttpPipeline getHttpPipeline(HttpClient httpClient) { TokenCredential credential = null; if (!interceptorManager.isPlaybackMode()) { credential = new EnvironmentCredentialBuilder().httpClient(httpClient).build(); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, DATAPLANE_SCOPE)); } if (getTestMode() == TestMode.RECORD) { policies.add(interceptorManager.getRecordPolicy()); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .build(); } Mono<JWTClaimsSet> verifyAttestationToken(HttpClient httpClient, String clientUri, String attestationToken) { final SignedJWT token; try { token = SignedJWT.parse(attestationToken); } catch (ParseException e) { return Mono.error(logger.logThrowableAsError(e)); } SignedJWT finalToken = token; return getSigningCertificateByKeyId(token, httpClient, clientUri) .handle((cert, sink) -> { final PublicKey key = cert.getPublicKey(); final RSAPublicKey rsaKey = (RSAPublicKey) key; final RSASSAVerifier verifier = new RSASSAVerifier(rsaKey); try { assertTrue(finalToken.verify(verifier)); } catch (JOSEException e) { sink.error(logger.logThrowableAsError(e)); return; } final JWTClaimsSet claims; try { claims = finalToken.getJWTClaimsSet(); } catch (ParseException e) { sink.error(logger.logThrowableAsError(e)); return; } assertNotNull(claims); sink.next(claims); }); } /** * Create a JWS Signer from the specified PKCS8 encoded signing key. * @param signingKeyBase64 Base64 encoded PKCS8 encoded RSA Private key. * @return JWSSigner created over the specified signing key. */ JWSSigner getJwsSigner(String signingKeyBase64) { byte[] signingKey = Base64.getDecoder().decode(signingKeyBase64); PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(signingKey); KeyFactory keyFactory; try { keyFactory = KeyFactory.getInstance("RSA"); } catch (NoSuchAlgorithmException e) { throw logger.logThrowableAsError(new RuntimeException(e)); } PrivateKey privateKey; try { privateKey = keyFactory.generatePrivate(keySpec); } catch (InvalidKeySpecException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } return new RSASSASigner(privateKey); } /** * Find the signing certificate associated with the specified SignedJWT token. * * This method depends on the token * @param token - MAA generated token on which to find the certificate. * @param client - Http Client used to retrieve signing certificates. * @param clientUri - Base URI for the attestation client. * @return X509Certificate which will have been used to sign the token. */ Mono<X509Certificate> getSigningCertificateByKeyId(SignedJWT token, HttpClient client, String clientUri) { AttestationClientBuilder builder = getBuilder(client, clientUri); return builder.buildSigningCertificatesAsyncClient().get() .handle((keySet, sink) -> { final CertificateFactory cf; try { cf = CertificateFactory.getInstance("X.509"); } catch (CertificateException e) { sink.error(logger.logThrowableAsError(e)); return; } final String keyId = token.getHeader().getKeyID(); boolean foundKey = false; for (JsonWebKey key : keySet.getKeys()) { if (keyId.equals(key.getKid())) { final Certificate cert; try { cert = cf.generateCertificate(base64ToStream(key.getX5C().get(0))); foundKey = true; } catch (CertificateException e) { sink.error(logger.logThrowableAsError(e)); return; } assertTrue(cert instanceof X509Certificate); sink.next((X509Certificate) cert); } } if (!foundKey) { sink.error(logger.logThrowableAsError(new RuntimeException(String.format( "Key %s not found in JSON Web Key Set", keyId)))); } }); } String getIsolatedSigningKey() { String signingKey = Configuration.getGlobalConfiguration().get("isolatedSigningKey"); if (signingKey == null) { signingKey = "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDZlz+tRMz5knbLWYY+CJmgzJ4WIoKAkVs6fwm2" + "JZt3ig8NKWDR9XC0Byixj4cCNOanvqSy2eLLhm30jNdc0o3ObLJVro+4WsI2p19DuV5PrpyCiZHDPb5DmxtMnsXpYV1ePIx" + "veLgNcTe4lu/pRGxaCcDxSWLG1DL4BsMXzLE2GQaCVLzPHI0NJVvd/DDXzbHK7tX45F8kRaXhnSd3fOaS4spw57r9oZfL1f" + "zM03DVptnEmBrpsxP8Kw7aLv5ZYLhX/rK9H7MrM4NA6g/g3dw4w/rf8025hJaAUJ+T68oARiXXBqDWCIkPXhkmukcmmP6Sl" + "8mnNAqRG55iRY4AqzLRAgMBAAECggEAU0dTJMLPXLnU47Fo8rch7WxDGR+uKPz5GKNkmSU9onvhlN0AZHt23kBbL9JKDusm" + "WI9bw+QmrFTQIqgBCVLA2X+6pZaBBUMfUAGxMV9yHDctSbzTYBFyj7d+tE2UW+Va8eVkrolakDKD7A9A1VvNyIwxH2hB+O1" + "gcJNN+f7q2FP4zpmJjEsMm9IL9sZ+6aiQSSsFQEih92yZEtHJ6Ohe8mdvSkmi3Ki0TSeqDfh4CksRnd6Bv/6oBAV48WaRa3" + "yQ7tnsBrhXrCRzXRbiCcJP+C/Eqe3gkXvWuzq+cgicX95qh05VPnf5Pa6w5N4wEgwmoorloYfDStYcthtKidUefQKBgQD3h" + "WXciacPcydjAfH+0WyoszqBup/B5OBw/ZNlv531EzongB8V7+3pCs1/gF4+H3qvIRkL7JWt4HVtZEBp4D3tpWQHoYpE6wxA" + "0oeGM/DXbCQttCpR3eHZXYa9hbuQZuFjkclXjDBIk/q+U178+GRiB7zZb7JGNCBwlpCkTh+WywKBgQDhC2GnDCAGjwjDHa5" + "Nf4qLWyISN34KoEF9hgAYIvNYzAwwp8J/xxxQ7j8hf5XJPnld1UprVrhrYL0aGSc0kNWri1pZx2PDge42XK9boRARvuuK5U" + "aV3VNk7xb7vHzjoNDJWzmLlEaVPLFQPHVWHobTMwQWbzKZmopTA+QuV68NUwKBgQCbMmU/9n9tTIKxrZKSd7VtwZM5rE5nQ" + "J8JubUl4xOjir637bmQA7RknoVjIJX21b4S+Om/dEQVlduLD4Tj3dp2m3Ew57TOqaIxMtAO8ZpdOE0m6wRt+HWX2PCW/Lcy" + "P4+q4sofvqK3nzFlDNlOPGCUps1eeI6LPjvo3D8tBl8AKQKBgQCHhv8sRtUSnhk8yCcsbN7Wxe9i4SB67cADBCwSXRoII/pDY" + "wRzR0n6Q0Cpv9hI9eLJa6YBtpwhroSzruo5ce/7+1RSNQ4Ts6/t9St2Fy1CQqQ/ZYx4vG14n7RLrlvYCgUy/klNkeJgBckS9R" + "YE4yV3E4YmrJjggH1FOVa1wgCeGQKBgQCbCKeM4EahWIyTBiZsTQ/l5hwhjPCrxncbyA2EZWNg3Ri6nuMIQzoBrCoX9L8t7e0" + "CKWAN0oM2Cn1VIJhsiE75dzN3vvGBcNZ9y+BwbwxDIAhrztyKKJS0h9YmAUVr+w5WsUPyMUPQ0/1wdTdxvKqQpriddrvyKRSJ" + "M9fb29+cwQ=="; } return signingKey; } String getPolicySigningCertificate0() { String certificate = Configuration.getGlobalConfiguration().get("policySigningCertificate0"); if (certificate == null) { certificate = "MIIC1jCCAb6gAwIBAgIIAxfcH6Co5DowDQYJKoZIhvcNAQELBQAwIjEgMB4GA1UEAxMXQXR0ZXN0YXRpb25DZXJ" + "0aWZpY2F0ZTAwHhcNMjEwMTE5MjAxMjU2WhcNMjIwMTE5MjAxMjU2WjAiMSAwHgYDVQQDExdBdHRlc3RhdGlvbkNlcnRpZmlj" + "YXRlMDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOOpb5GvUCuOYiB4ZazIePtSazdXGDyjtFlr4ulo1VY1Ai91X" + "IcWIPELCV1OfQiIoJlj096u3cirP1GvCKgb4FTNHHi7omDaQvYRmuZZ6KXrqNi5Iu/jKjGgjwYt+FYV/9eqYCWdyS0RjMbKw7" + "sZUvBxTDeTqQunwbjPZ1y4JbxXx6xwcZJHfwD6g7aHslsblHh4zM1mhiuoIMpNUeeThLwQTD6oGSmIt+hqRbfvd3Ljr/v7W3m" + "SKvw5X9L85PNHaDIUd4vHSDiytZUoXyhtbC8RKGzxgZCz6gFwM5JF6QhYE/A84HFH7JZ3FKk1UJBoTjcv63BshT7Pt3fYMZqV" + "SzkCAwEAAaMQMA4wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAC+YKGp6foV94lYQB/yNQ53Bk+YeR4dgAkR99U" + "t1VvvXKyWnka/X8QRjKoaPULDZH4+9ZXg6lSfhuEGTSPUzO294mlNbF6n6cuuuawe3OeuZUO53b4xYXPRv898FBRxdD+FCW/f" + "A5HLBrGItfk31+aNUFryCd5RrJfJU8Rurm+7uGPtS16Ft0P7xSnL0C7nfHNVuEKFV0ZbzgzXlzkKQT4d3fYpvOxzYoXImxzwz" + "W/jzZjN3aKbOlmY2LyW8J5BKKgA3C4FRWwCTmgqYp2vQhsw1HgCeBjmBN5/imnk2lsgjrvvSdlkXOnNf5atibuguYzdakz99b" + "wwWWsd5HddtcyA=="; } return certificate; } private static String getLocationShortName() { String shortName = Configuration.getGlobalConfiguration().get("locationShortName"); if (shortName == null) { shortName = "wus"; } return shortName; } private static String getIsolatedUrl() { String url = Configuration.getGlobalConfiguration().get("ATTESTATION_ISOLATED_URL"); if (url == null) { url = "https: } return url; } private static String getAadUrl() { String url = Configuration.getGlobalConfiguration().get("ATTESTATION_AAD_URL"); if (url == null) { url = "https: } return url; } static Stream<Arguments> getAttestationClients() { final String regionShortName = getLocationShortName(); return getHttpClients().flatMap(httpClient -> Stream.of( Arguments.of(httpClient, "https: Arguments.of(httpClient, getIsolatedUrl()), Arguments.of(httpClient, getAadUrl()))); } static Stream<Arguments> getPolicyClients() { return getAttestationClients().flatMap(clientParams -> Stream.of( Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.OPEN_ENCLAVE), Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.TPM), Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.SGX_ENCLAVE))); } }
class AttestationClientTestBase extends TestBase { private static final String DATAPLANE_SCOPE = "https: final ClientLogger logger = new ClientLogger(AttestationClientTestBase.class); enum ClientTypes { SHARED, ISOLATED, AAD, } /** * Determine the Attestation instance type based on the client URI provided. * @param clientUri - URI for the attestation client. * @return the ClientTypes corresponding to the specified client URI. */ ClientTypes classifyClient(String clientUri) { assertNotNull(clientUri); String regionShortName = getLocationShortName(); String sharedUri = "https: if (sharedUri.equals(clientUri)) { return ClientTypes.SHARED; } else if (getIsolatedUrl().equals(clientUri)) { return ClientTypes.ISOLATED; } else if (getAadUrl().equals(clientUri)) { return ClientTypes.AAD; } throw new IllegalArgumentException(); } /** * Convert a base64 encoded string into a byte stream. * @param base64 - Base64 encoded string to be decoded * @return stream of bytes encoded in the base64 encoded string. */ InputStream base64ToStream(String base64) { byte[] decoded = Base64.getDecoder().decode(base64); return new ByteArrayInputStream(decoded); } /** * Retrieve an attestationClientBuilder for the specified HTTP client and client URI * @param httpClient - HTTP client ot be used for the attestation client. * @param clientUri - Client base URI to access the service. * @return Returns an attestation client builder corresponding to the httpClient and clientUri. */ AttestationClientBuilder getBuilder(HttpClient httpClient, String clientUri) { return new AttestationClientBuilder().pipeline(getHttpPipeline(httpClient)).instanceUrl(clientUri); } /** * Retrieves an HTTP pipeline configured on the specified HTTP pipeline. * * Used by getBuilder(). * @param httpClient - Client on which to configure the HTTP pipeline. * @return an HttpPipeline object configured for the MAA service on the specified http client. */ private HttpPipeline getHttpPipeline(HttpClient httpClient) { TokenCredential credential = null; if (!interceptorManager.isPlaybackMode()) { credential = new EnvironmentCredentialBuilder().httpClient(httpClient).build(); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, DATAPLANE_SCOPE)); } if (getTestMode() == TestMode.RECORD) { policies.add(interceptorManager.getRecordPolicy()); } return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .build(); } /** * Verifies an MAA attestation token and returns the set of attestation claims embedded in the token. * @param httpClient - the HTTP client which was used to retrieve the token (used to retrieve the signing certificates for the attestation instance) * @param clientUri - the base URI used to access the attestation instance (used to retrieve the signing certificates for the attestation instance) * @param attestationToken - Json Web Token issued by the Attestation Service. * @return a JWTClaimSet containing the claims associated with the attestation token. */ Mono<JWTClaimsSet> verifyAttestationToken(HttpClient httpClient, String clientUri, String attestationToken) { final SignedJWT token; try { token = SignedJWT.parse(attestationToken); } catch (ParseException e) { return Mono.error(logger.logThrowableAsError(e)); } SignedJWT finalToken = token; return getSigningCertificateByKeyId(token, httpClient, clientUri) .handle((cert, sink) -> { final PublicKey key = cert.getPublicKey(); final RSAPublicKey rsaKey = (RSAPublicKey) key; final RSASSAVerifier verifier = new RSASSAVerifier(rsaKey); try { assertTrue(finalToken.verify(verifier)); } catch (JOSEException e) { sink.error(logger.logThrowableAsError(e)); return; } final JWTClaimsSet claims; try { claims = finalToken.getJWTClaimsSet(); } catch (ParseException e) { sink.error(logger.logThrowableAsError(e)); return; } assertNotNull(claims); sink.next(claims); }); } /** * Create a JWS Signer from the specified PKCS8 encoded signing key. * @param signingKeyBase64 Base64 encoded PKCS8 encoded RSA Private key. * @return JWSSigner created over the specified signing key. */ JWSSigner getJwsSigner(String signingKeyBase64) { byte[] signingKey = Base64.getDecoder().decode(signingKeyBase64); PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(signingKey); KeyFactory keyFactory; try { keyFactory = KeyFactory.getInstance("RSA"); } catch (NoSuchAlgorithmException e) { throw logger.logThrowableAsError(new RuntimeException(e)); } PrivateKey privateKey; try { privateKey = keyFactory.generatePrivate(keySpec); } catch (InvalidKeySpecException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } return new RSASSASigner(privateKey); } /** * Find the signing certificate associated with the specified SignedJWT token. * * This method depends on the token * @param token - MAA generated token on which to find the certificate. * @param client - Http Client used to retrieve signing certificates. * @param clientUri - Base URI for the attestation client. * @return X509Certificate which will have been used to sign the token. */ Mono<X509Certificate> getSigningCertificateByKeyId(SignedJWT token, HttpClient client, String clientUri) { AttestationClientBuilder builder = getBuilder(client, clientUri); return builder.buildSigningCertificatesAsyncClient().get() .handle((keySet, sink) -> { final CertificateFactory cf; try { cf = CertificateFactory.getInstance("X.509"); } catch (CertificateException e) { sink.error(logger.logThrowableAsError(e)); return; } final String keyId = token.getHeader().getKeyID(); boolean foundKey = false; for (JsonWebKey key : keySet.getKeys()) { if (keyId.equals(key.getKid())) { final Certificate cert; try { cert = cf.generateCertificate(base64ToStream(key.getX5C().get(0))); foundKey = true; } catch (CertificateException e) { sink.error(logger.logThrowableAsError(e)); return; } assertTrue(cert instanceof X509Certificate); sink.next((X509Certificate) cert); } } if (!foundKey) { sink.error(logger.logThrowableAsError(new RuntimeException(String.format( "Key %s not found in JSON Web Key Set", keyId)))); } }); } /** * Retrieve the signing certificate used for the isolated attestation instance. * @return Returns a base64 encoded X.509 certificate used to sign policy documents. */ /** * Retrieve the signing key used for the isolated attestation instance. * @return Returns a base64 encoded RSA Key used to sign policy documents. */ String getIsolatedSigningKey() { String signingKey = Configuration.getGlobalConfiguration().get("isolatedSigningKey"); if (signingKey == null) { signingKey = "MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDZlz+tRMz5knbLWYY+CJmgzJ4WIoKAkVs6fwm2" + "JZt3ig8NKWDR9XC0Byixj4cCNOanvqSy2eLLhm30jNdc0o3ObLJVro+4WsI2p19DuV5PrpyCiZHDPb5DmxtMnsXpYV1ePIx" + "veLgNcTe4lu/pRGxaCcDxSWLG1DL4BsMXzLE2GQaCVLzPHI0NJVvd/DDXzbHK7tX45F8kRaXhnSd3fOaS4spw57r9oZfL1f" + "zM03DVptnEmBrpsxP8Kw7aLv5ZYLhX/rK9H7MrM4NA6g/g3dw4w/rf8025hJaAUJ+T68oARiXXBqDWCIkPXhkmukcmmP6Sl" + "8mnNAqRG55iRY4AqzLRAgMBAAECggEAU0dTJMLPXLnU47Fo8rch7WxDGR+uKPz5GKNkmSU9onvhlN0AZHt23kBbL9JKDusm" + "WI9bw+QmrFTQIqgBCVLA2X+6pZaBBUMfUAGxMV9yHDctSbzTYBFyj7d+tE2UW+Va8eVkrolakDKD7A9A1VvNyIwxH2hB+O1" + "gcJNN+f7q2FP4zpmJjEsMm9IL9sZ+6aiQSSsFQEih92yZEtHJ6Ohe8mdvSkmi3Ki0TSeqDfh4CksRnd6Bv/6oBAV48WaRa3" + "yQ7tnsBrhXrCRzXRbiCcJP+C/Eqe3gkXvWuzq+cgicX95qh05VPnf5Pa6w5N4wEgwmoorloYfDStYcthtKidUefQKBgQD3h" + "WXciacPcydjAfH+0WyoszqBup/B5OBw/ZNlv531EzongB8V7+3pCs1/gF4+H3qvIRkL7JWt4HVtZEBp4D3tpWQHoYpE6wxA" + "0oeGM/DXbCQttCpR3eHZXYa9hbuQZuFjkclXjDBIk/q+U178+GRiB7zZb7JGNCBwlpCkTh+WywKBgQDhC2GnDCAGjwjDHa5" + "Nf4qLWyISN34KoEF9hgAYIvNYzAwwp8J/xxxQ7j8hf5XJPnld1UprVrhrYL0aGSc0kNWri1pZx2PDge42XK9boRARvuuK5U" + "aV3VNk7xb7vHzjoNDJWzmLlEaVPLFQPHVWHobTMwQWbzKZmopTA+QuV68NUwKBgQCbMmU/9n9tTIKxrZKSd7VtwZM5rE5nQ" + "J8JubUl4xOjir637bmQA7RknoVjIJX21b4S+Om/dEQVlduLD4Tj3dp2m3Ew57TOqaIxMtAO8ZpdOE0m6wRt+HWX2PCW/Lcy" + "P4+q4sofvqK3nzFlDNlOPGCUps1eeI6LPjvo3D8tBl8AKQKBgQCHhv8sRtUSnhk8yCcsbN7Wxe9i4SB67cADBCwSXRoII/pDY" + "wRzR0n6Q0Cpv9hI9eLJa6YBtpwhroSzruo5ce/7+1RSNQ4Ts6/t9St2Fy1CQqQ/ZYx4vG14n7RLrlvYCgUy/klNkeJgBckS9R" + "YE4yV3E4YmrJjggH1FOVa1wgCeGQKBgQCbCKeM4EahWIyTBiZsTQ/l5hwhjPCrxncbyA2EZWNg3Ri6nuMIQzoBrCoX9L8t7e0" + "CKWAN0oM2Cn1VIJhsiE75dzN3vvGBcNZ9y+BwbwxDIAhrztyKKJS0h9YmAUVr+w5WsUPyMUPQ0/1wdTdxvKqQpriddrvyKRSJ" + "M9fb29+cwQ=="; } return signingKey; } /** * Retrieves a certificate which can be used to sign attestation policies. * @return Returns a base64 encoded X.509 certificate which can be used to sign attestation policies. */ String getPolicySigningCertificate0() { String certificate = Configuration.getGlobalConfiguration().get("policySigningCertificate0"); if (certificate == null) { certificate = "MIIC1jCCAb6gAwIBAgIIAxfcH6Co5DowDQYJKoZIhvcNAQELBQAwIjEgMB4GA1UEAxMXQXR0ZXN0YXRpb25DZXJ" + "0aWZpY2F0ZTAwHhcNMjEwMTE5MjAxMjU2WhcNMjIwMTE5MjAxMjU2WjAiMSAwHgYDVQQDExdBdHRlc3RhdGlvbkNlcnRpZmlj" + "YXRlMDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOOpb5GvUCuOYiB4ZazIePtSazdXGDyjtFlr4ulo1VY1Ai91X" + "IcWIPELCV1OfQiIoJlj096u3cirP1GvCKgb4FTNHHi7omDaQvYRmuZZ6KXrqNi5Iu/jKjGgjwYt+FYV/9eqYCWdyS0RjMbKw7" + "sZUvBxTDeTqQunwbjPZ1y4JbxXx6xwcZJHfwD6g7aHslsblHh4zM1mhiuoIMpNUeeThLwQTD6oGSmIt+hqRbfvd3Ljr/v7W3m" + "SKvw5X9L85PNHaDIUd4vHSDiytZUoXyhtbC8RKGzxgZCz6gFwM5JF6QhYE/A84HFH7JZ3FKk1UJBoTjcv63BshT7Pt3fYMZqV" + "SzkCAwEAAaMQMA4wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAC+YKGp6foV94lYQB/yNQ53Bk+YeR4dgAkR99U" + "t1VvvXKyWnka/X8QRjKoaPULDZH4+9ZXg6lSfhuEGTSPUzO294mlNbF6n6cuuuawe3OeuZUO53b4xYXPRv898FBRxdD+FCW/f" + "A5HLBrGItfk31+aNUFryCd5RrJfJU8Rurm+7uGPtS16Ft0P7xSnL0C7nfHNVuEKFV0ZbzgzXlzkKQT4d3fYpvOxzYoXImxzwz" + "W/jzZjN3aKbOlmY2LyW8J5BKKgA3C4FRWwCTmgqYp2vQhsw1HgCeBjmBN5/imnk2lsgjrvvSdlkXOnNf5atibuguYzdakz99b" + "wwWWsd5HddtcyA=="; } return certificate; } /** * Returns the location in which the tests are running. * @return returns the location in which the tests are running. */ private static String getLocationShortName() { String shortName = Configuration.getGlobalConfiguration().get("locationShortName"); if (shortName == null) { shortName = "wus"; } return shortName; } /** * Returns the url associated with the isolated MAA instance. * @return the url associated with the isolated MAA instance. */ private static String getIsolatedUrl() { String url = Configuration.getGlobalConfiguration().get("ATTESTATION_ISOLATED_URL"); if (url == null) { url = "https: } return url; } /** * Returns the url associated with the AAD MAA instance. * @return the url associated with the AAD MAA instance. */ private static String getAadUrl() { String url = Configuration.getGlobalConfiguration().get("ATTESTATION_AAD_URL"); if (url == null) { url = "https: } return url; } /** * Returns the set of clients to be used to test the attestation service. * @return a stream of Argument objects associated with each of the regions on which to run the attestation test. */ static Stream<Arguments> getAttestationClients() { final String regionShortName = getLocationShortName(); return getHttpClients().flatMap(httpClient -> Stream.of( Arguments.of(httpClient, "https: Arguments.of(httpClient, getIsolatedUrl()), Arguments.of(httpClient, getAadUrl()))); } /** * Returns the set of clients and attestation types used for attestation policy APIs. * @return a stream of Argument objects associated with each of the regions on which to run the attestation test. */ static Stream<Arguments> getPolicyClients() { return getAttestationClients().flatMap(clientParams -> Stream.of( Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.OPEN_ENCLAVE), Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.TPM), Arguments.of(clientParams.get()[0], clientParams.get()[1], AttestationType.SGX_ENCLAVE))); } }
replace InternalObjectNode with ObjectNode. `InternalObjectNode` is internal type and is going to be removed.
private void validateDataCreation(int expectedSize) { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Validating {} documents were loaded into [{}:{}]", expectedSize, _configuration.getDatabaseId(), containerName); final List<FeedResponse<InternalObjectNode>> queryItemsResponseList = container .queryItems(COUNT_ALL_QUERY, InternalObjectNode.class) .byPage() .collectList() .block(BULK_LOAD_WAIT_DURATION); final int resultCount = Optional.ofNullable(queryItemsResponseList) .map(responseList -> responseList.get(0)) .map(FeedResponse::getResults) .map(list -> list.get(0)) .map(internalObjectNode -> { try { final ObjectNode result = internalObjectNode.getObject(ObjectNode.class); return result.get(COUNT_ALL_QUERY_RESULT_FIELD).intValue(); } catch (IOException ex) { LOGGER.error("Error extracting the result count from the response"); } return 0; }) .orElse(0); if (resultCount != expectedSize) { throw new IllegalStateException("Expected number of records " + expectedSize + " not found in the container " + containerName + ". Actual count: " + resultCount); } }
final List<FeedResponse<InternalObjectNode>> queryItemsResponseList = container
private void validateDataCreation(int expectedSize) { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Validating {} documents were loaded into [{}:{}]", expectedSize, _configuration.getDatabaseId(), containerName); final List<FeedResponse<ObjectNode>> queryItemsResponseList = container .queryItems(COUNT_ALL_QUERY, ObjectNode.class) .byPage() .collectList() .block(BULK_LOAD_WAIT_DURATION); final int resultCount = Optional.ofNullable(queryItemsResponseList) .map(responseList -> responseList.get(0)) .map(FeedResponse::getResults) .map(list -> list.get(0)) .map(objectNode -> objectNode.get(COUNT_ALL_QUERY_RESULT_FIELD).intValue()) .orElse(0); if (resultCount != expectedSize) { throw new IllegalStateException("Expected number of records " + expectedSize + " not found in the container " + containerName + ". Actual count: " + resultCount); } }
class DataLoader { private static final Logger LOGGER = LoggerFactory.getLogger(DataLoader.class); private static final int MAX_BATCH_SIZE = 10000; private static final int BULK_OPERATION_CONCURRENCY = 5; private static final Duration BULK_LOAD_WAIT_DURATION = Duration.ofSeconds(60); private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; private final Configuration _configuration; private final CosmosAsyncClient _client; public DataLoader(final Configuration configuration, final CosmosAsyncClient client) { _configuration = Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); _client = Preconditions.checkNotNull(client, "The CosmosAsyncClient needed for data loading can not be null"); } public void loadData(final Map<Key, ObjectNode> records) { bulkCreateItems(records); validateDataCreation(records.size()); } private void bulkCreateItems(final Map<Key, ObjectNode> records) { final List<CosmosItemOperation> cosmosItemOperations = mapToCosmosItemOperation(records); final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Bulk loading {} documents in [{}:{}]", cosmosItemOperations.size(), database.getId(), containerName); final BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(MAX_BATCH_SIZE) .setMaxMicroBatchConcurrency(BULK_OPERATION_CONCURRENCY); container.processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions) .blockLast(BULK_LOAD_WAIT_DURATION); LOGGER.info("Completed document loading into [{}:{}]", database.getId(), containerName); } /** * Map the generated data to createItem requests in the underlying container * * @param records Data we want to load into the container * @return List of CosmosItemOperation, each mapping to a createItem for that record */ private List<CosmosItemOperation> mapToCosmosItemOperation(final Map<Key, ObjectNode> records) { return records.entrySet() .stream() .map(record -> { final String partitionKey = record.getKey().getPartitioningKey(); final InternalObjectNode objectNode = new InternalObjectNode(record.getValue()); return BulkOperations.getCreateItemOperation(objectNode, new PartitionKey(partitionKey)); }) .collect(Collectors.toList()); } }
class DataLoader { private static final Logger LOGGER = LoggerFactory.getLogger(DataLoader.class); private static final int MAX_BATCH_SIZE = 10000; private static final int BULK_OPERATION_CONCURRENCY = 5; private static final Duration BULK_LOAD_WAIT_DURATION = Duration.ofSeconds(60); private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; private final Configuration _configuration; private final CosmosAsyncClient _client; public DataLoader(final Configuration configuration, final CosmosAsyncClient client) { _configuration = Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); _client = Preconditions.checkNotNull(client, "The CosmosAsyncClient needed for data loading can not be null"); } public void loadData(final Map<Key, ObjectNode> records) { bulkCreateItems(records); validateDataCreation(records.size()); } private void bulkCreateItems(final Map<Key, ObjectNode> records) { final List<CosmosItemOperation> cosmosItemOperations = mapToCosmosItemOperation(records); final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Bulk loading {} documents in [{}:{}]", cosmosItemOperations.size(), database.getId(), containerName); final BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(MAX_BATCH_SIZE) .setMaxMicroBatchConcurrency(BULK_OPERATION_CONCURRENCY); container.processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions) .blockLast(BULK_LOAD_WAIT_DURATION); LOGGER.info("Completed document loading into [{}:{}]", database.getId(), containerName); } /** * Map the generated data to createItem requests in the underlying container * * @param records Data we want to load into the container * @return List of CosmosItemOperation, each mapping to a createItem for that record */ private List<CosmosItemOperation> mapToCosmosItemOperation(final Map<Key, ObjectNode> records) { return records.entrySet() .stream() .map(record -> { final String partitionKey = record.getKey().getPartitioningKey(); final ObjectNode value = record.getValue(); return BulkOperations.getCreateItemOperation(value, new PartitionKey(partitionKey)); }) .collect(Collectors.toList()); } }
please remove all instances of `InternalObjectNode` this is internal type and is going to be removed.
private List<CosmosItemOperation> mapToCosmosItemOperation(final Map<Key, ObjectNode> records) { return records.entrySet() .stream() .map(record -> { final String partitionKey = record.getKey().getPartitioningKey(); final InternalObjectNode objectNode = new InternalObjectNode(record.getValue()); return BulkOperations.getCreateItemOperation(objectNode, new PartitionKey(partitionKey)); }) .collect(Collectors.toList()); }
final InternalObjectNode objectNode = new InternalObjectNode(record.getValue());
private List<CosmosItemOperation> mapToCosmosItemOperation(final Map<Key, ObjectNode> records) { return records.entrySet() .stream() .map(record -> { final String partitionKey = record.getKey().getPartitioningKey(); final ObjectNode value = record.getValue(); return BulkOperations.getCreateItemOperation(value, new PartitionKey(partitionKey)); }) .collect(Collectors.toList()); }
class DataLoader { private static final Logger LOGGER = LoggerFactory.getLogger(DataLoader.class); private static final int MAX_BATCH_SIZE = 10000; private static final int BULK_OPERATION_CONCURRENCY = 5; private static final Duration BULK_LOAD_WAIT_DURATION = Duration.ofSeconds(60); private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; private final Configuration _configuration; private final CosmosAsyncClient _client; public DataLoader(final Configuration configuration, final CosmosAsyncClient client) { _configuration = Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); _client = Preconditions.checkNotNull(client, "The CosmosAsyncClient needed for data loading can not be null"); } public void loadData(final Map<Key, ObjectNode> records) { bulkCreateItems(records); validateDataCreation(records.size()); } private void bulkCreateItems(final Map<Key, ObjectNode> records) { final List<CosmosItemOperation> cosmosItemOperations = mapToCosmosItemOperation(records); final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Bulk loading {} documents in [{}:{}]", cosmosItemOperations.size(), database.getId(), containerName); final BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(MAX_BATCH_SIZE) .setMaxMicroBatchConcurrency(BULK_OPERATION_CONCURRENCY); container.processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions) .blockLast(BULK_LOAD_WAIT_DURATION); LOGGER.info("Completed document loading into [{}:{}]", database.getId(), containerName); } private void validateDataCreation(int expectedSize) { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Validating {} documents were loaded into [{}:{}]", expectedSize, _configuration.getDatabaseId(), containerName); final List<FeedResponse<InternalObjectNode>> queryItemsResponseList = container .queryItems(COUNT_ALL_QUERY, InternalObjectNode.class) .byPage() .collectList() .block(BULK_LOAD_WAIT_DURATION); final int resultCount = Optional.ofNullable(queryItemsResponseList) .map(responseList -> responseList.get(0)) .map(FeedResponse::getResults) .map(list -> list.get(0)) .map(internalObjectNode -> { try { final ObjectNode result = internalObjectNode.getObject(ObjectNode.class); return result.get(COUNT_ALL_QUERY_RESULT_FIELD).intValue(); } catch (IOException ex) { LOGGER.error("Error extracting the result count from the response"); } return 0; }) .orElse(0); if (resultCount != expectedSize) { throw new IllegalStateException("Expected number of records " + expectedSize + " not found in the container " + containerName + ". Actual count: " + resultCount); } } /** * Map the generated data to createItem requests in the underlying container * * @param records Data we want to load into the container * @return List of CosmosItemOperation, each mapping to a createItem for that record */ }
class DataLoader { private static final Logger LOGGER = LoggerFactory.getLogger(DataLoader.class); private static final int MAX_BATCH_SIZE = 10000; private static final int BULK_OPERATION_CONCURRENCY = 5; private static final Duration BULK_LOAD_WAIT_DURATION = Duration.ofSeconds(60); private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; private final Configuration _configuration; private final CosmosAsyncClient _client; public DataLoader(final Configuration configuration, final CosmosAsyncClient client) { _configuration = Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); _client = Preconditions.checkNotNull(client, "The CosmosAsyncClient needed for data loading can not be null"); } public void loadData(final Map<Key, ObjectNode> records) { bulkCreateItems(records); validateDataCreation(records.size()); } private void bulkCreateItems(final Map<Key, ObjectNode> records) { final List<CosmosItemOperation> cosmosItemOperations = mapToCosmosItemOperation(records); final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Bulk loading {} documents in [{}:{}]", cosmosItemOperations.size(), database.getId(), containerName); final BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(MAX_BATCH_SIZE) .setMaxMicroBatchConcurrency(BULK_OPERATION_CONCURRENCY); container.processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions) .blockLast(BULK_LOAD_WAIT_DURATION); LOGGER.info("Completed document loading into [{}:{}]", database.getId(), containerName); } private void validateDataCreation(int expectedSize) { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Validating {} documents were loaded into [{}:{}]", expectedSize, _configuration.getDatabaseId(), containerName); final List<FeedResponse<ObjectNode>> queryItemsResponseList = container .queryItems(COUNT_ALL_QUERY, ObjectNode.class) .byPage() .collectList() .block(BULK_LOAD_WAIT_DURATION); final int resultCount = Optional.ofNullable(queryItemsResponseList) .map(responseList -> responseList.get(0)) .map(FeedResponse::getResults) .map(list -> list.get(0)) .map(objectNode -> objectNode.get(COUNT_ALL_QUERY_RESULT_FIELD).intValue()) .orElse(0); if (resultCount != expectedSize) { throw new IllegalStateException("Expected number of records " + expectedSize + " not found in the container " + containerName + ". Actual count: " + resultCount); } } /** * Map the generated data to createItem requests in the underlying container * * @param records Data we want to load into the container * @return List of CosmosItemOperation, each mapping to a createItem for that record */ }
We should be able to create app gateway without pfx (use http instead of https). Later task.
public void canCRUDApplicationGatewayWithWAF() throws Exception { if (skipInPlayback()) { return; } String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpsPort(443) .withSslCertificateFromPfxFile( new File(getClass().getClassLoader().getResource("myTest.pfx").getFile())) .withSslCertificatePassword("Abc123") .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertTrue(appGateway != null); Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier())); Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size())); Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2); Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5); ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration(); config.withFileUploadLimitInMb(200); config .withDisabledRuleGroups( Arrays .asList( new ApplicationGatewayFirewallDisabledRuleGroup() .withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION"))); config.withRequestBodyCheck(true); config.withMaxRequestBodySizeInKb(64); config .withExclusions( Arrays .asList( new ApplicationGatewayFirewallExclusion() .withMatchVariable("RequestHeaderNames") .withSelectorMatchOperator("StartsWith") .withSelector("User-Agent"))); appGateway.update().withWebApplicationFirewall(config).apply(); appGateway.refresh(); Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200); Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck()); Assertions .assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64); Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(), "RequestHeaderNames"); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(), "StartsWith"); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent"); Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(), "REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION"); }
return;
public void canCRUDApplicationGatewayWithWAF() throws Exception { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpPort(80) .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertTrue(appGateway != null); Assertions.assertTrue(ApplicationGatewayTier.WAF_V2.equals(appGateway.tier())); Assertions.assertTrue(ApplicationGatewaySkuName.WAF_V2.equals(appGateway.size())); Assertions.assertTrue(appGateway.autoscaleConfiguration().minCapacity() == 2); Assertions.assertTrue(appGateway.autoscaleConfiguration().maxCapacity() == 5); ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration(); config.withFileUploadLimitInMb(200); config .withDisabledRuleGroups( Arrays .asList( new ApplicationGatewayFirewallDisabledRuleGroup() .withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION"))); config.withRequestBodyCheck(true); config.withMaxRequestBodySizeInKb(64); config .withExclusions( Arrays .asList( new ApplicationGatewayFirewallExclusion() .withMatchVariable("RequestHeaderNames") .withSelectorMatchOperator("StartsWith") .withSelector("User-Agent"))); appGateway.update().withWebApplicationFirewall(config).apply(); appGateway.refresh(); Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb() == 200); Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck()); Assertions .assertEquals(appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb(), (Integer) 64); Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().exclusions().size(), 1); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable(), "RequestHeaderNames"); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator(), "StartsWith"); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector(), "User-Agent"); Assertions.assertEquals(appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size(), 1); Assertions .assertEquals( appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName(), "REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION"); }
class ApplicationGatewayTests extends NetworkManagementTest { @Test @Test @Disabled("Need client id for key vault usage") public void canCreateApplicationGatewayWithSecret() throws Exception { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); String identityName = generateRandomResourceName("id", 10); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); Identity identity = msiManager .identities() .define(identityName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .create(); Assertions.assertNotNull(identity.name()); Assertions.assertNotNull(identity.principalId()); Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId()); Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId()); ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpsPort(443) .withSslCertificate("ssl1") .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withIdentity(serviceIdentity) .defineSslCertificate("ssl1") .withKeyVaultSecretId(secret1.id()) .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId()); Assertions .assertEquals( secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId()); appGateway = appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply(); Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId()); } private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception { String vaultName = generateRandomResourceName("vlt", 10); String secretName = generateRandomResourceName("srt", 10); String secretValue = Files .readFirstLine( new File(getClass().getClassLoader().getResource("test.certificate").getFile()), Charset.defaultCharset()); Vault vault = keyVaultManager .vaults() .define(vaultName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretAllPermissions() .attach() .defineAccessPolicy() .forObjectId(identityPrincipal) .allowSecretAllPermissions() .attach() .withAccessFromAzureServices() .withDeploymentEnabled() .withSoftDeleteEnabled() .create(); return vault.secrets().define(secretName).withValue(secretValue).create(); } private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception { ObjectMapper mapper = new ObjectMapper(); JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode(); ((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId()); ((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId()); ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue = new JacksonAdapter() .deserialize( mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject), ManagedServiceIdentityUserAssignedIdentities.class, SerializerEncoding.JSON); Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>(); userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue); ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity(); serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED); serviceIdentity.withUserAssignedIdentities(userAssignedIdentities); return serviceIdentity; } }
class ApplicationGatewayTests extends NetworkManagementTest { @Test @Test @Disabled("Need client id for key vault usage") public void canCreateApplicationGatewayWithSecret() throws Exception { String appGatewayName = generateRandomResourceName("agwaf", 15); String appPublicIp = generateRandomResourceName("pip", 15); String identityName = generateRandomResourceName("id", 10); PublicIpAddress pip = networkManager .publicIpAddresses() .define(appPublicIp) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withSku(PublicIPSkuType.STANDARD) .withStaticIP() .create(); Identity identity = msiManager .identities() .define(identityName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .create(); Assertions.assertNotNull(identity.name()); Assertions.assertNotNull(identity.principalId()); Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId()); Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId()); ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity); ApplicationGateway appGateway = networkManager .applicationGateways() .define(appGatewayName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineRequestRoutingRule("rule1") .fromPublicFrontend() .fromFrontendHttpsPort(443) .withSslCertificate("ssl1") .toBackendHttpPort(8080) .toBackendIPAddress("11.1.1.1") .toBackendIPAddress("11.1.1.2") .attach() .withIdentity(serviceIdentity) .defineSslCertificate("ssl1") .withKeyVaultSecretId(secret1.id()) .attach() .withExistingPublicIpAddress(pip) .withTier(ApplicationGatewayTier.WAF_V2) .withSize(ApplicationGatewaySkuName.WAF_V2) .withAutoScale(2, 5) .withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION) .create(); Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId()); Assertions .assertEquals( secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId()); appGateway = appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply(); Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId()); } private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception { String vaultName = generateRandomResourceName("vlt", 10); String secretName = generateRandomResourceName("srt", 10); String secretValue = Files .readFirstLine( new File(getClass().getClassLoader().getResource("test.certificate").getFile()), Charset.defaultCharset()); Vault vault = keyVaultManager .vaults() .define(vaultName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(servicePrincipal) .allowSecretAllPermissions() .attach() .defineAccessPolicy() .forObjectId(identityPrincipal) .allowSecretAllPermissions() .attach() .withAccessFromAzureServices() .withDeploymentEnabled() .withSoftDeleteEnabled() .create(); return vault.secrets().define(secretName).withValue(secretValue).create(); } private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception { ObjectMapper mapper = new ObjectMapper(); JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode(); ((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId()); ((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId()); ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue = new JacksonAdapter() .deserialize( mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject), ManagedServiceIdentityUserAssignedIdentities.class, SerializerEncoding.JSON); Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>(); userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue); ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity(); serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED); serviceIdentity.withUserAssignedIdentities(userAssignedIdentities); return serviceIdentity; } }
This will fail the whole workload if we have some error on some data creation. Do we want to add some fault tolerance or retry .
private void validateDataCreation(int expectedSize) { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Validating {} documents were loaded into [{}:{}]", expectedSize, _configuration.getDatabaseId(), containerName); final List<FeedResponse<InternalObjectNode>> queryItemsResponseList = container .queryItems(COUNT_ALL_QUERY, InternalObjectNode.class) .byPage() .collectList() .block(BULK_LOAD_WAIT_DURATION); final int resultCount = Optional.ofNullable(queryItemsResponseList) .map(responseList -> responseList.get(0)) .map(FeedResponse::getResults) .map(list -> list.get(0)) .map(internalObjectNode -> { try { final ObjectNode result = internalObjectNode.getObject(ObjectNode.class); return result.get(COUNT_ALL_QUERY_RESULT_FIELD).intValue(); } catch (IOException ex) { LOGGER.error("Error extracting the result count from the response"); } return 0; }) .orElse(0); if (resultCount != expectedSize) { throw new IllegalStateException("Expected number of records " + expectedSize + " not found in the container " + containerName + ". Actual count: " + resultCount); } }
final ObjectNode result = internalObjectNode.getObject(ObjectNode.class);
private void validateDataCreation(int expectedSize) { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Validating {} documents were loaded into [{}:{}]", expectedSize, _configuration.getDatabaseId(), containerName); final List<FeedResponse<ObjectNode>> queryItemsResponseList = container .queryItems(COUNT_ALL_QUERY, ObjectNode.class) .byPage() .collectList() .block(BULK_LOAD_WAIT_DURATION); final int resultCount = Optional.ofNullable(queryItemsResponseList) .map(responseList -> responseList.get(0)) .map(FeedResponse::getResults) .map(list -> list.get(0)) .map(objectNode -> objectNode.get(COUNT_ALL_QUERY_RESULT_FIELD).intValue()) .orElse(0); if (resultCount != expectedSize) { throw new IllegalStateException("Expected number of records " + expectedSize + " not found in the container " + containerName + ". Actual count: " + resultCount); } }
class DataLoader { private static final Logger LOGGER = LoggerFactory.getLogger(DataLoader.class); private static final int MAX_BATCH_SIZE = 10000; private static final int BULK_OPERATION_CONCURRENCY = 5; private static final Duration BULK_LOAD_WAIT_DURATION = Duration.ofSeconds(60); private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; private final Configuration _configuration; private final CosmosAsyncClient _client; public DataLoader(final Configuration configuration, final CosmosAsyncClient client) { _configuration = Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); _client = Preconditions.checkNotNull(client, "The CosmosAsyncClient needed for data loading can not be null"); } public void loadData(final Map<Key, ObjectNode> records) { bulkCreateItems(records); validateDataCreation(records.size()); } private void bulkCreateItems(final Map<Key, ObjectNode> records) { final List<CosmosItemOperation> cosmosItemOperations = mapToCosmosItemOperation(records); final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Bulk loading {} documents in [{}:{}]", cosmosItemOperations.size(), database.getId(), containerName); final BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(MAX_BATCH_SIZE) .setMaxMicroBatchConcurrency(BULK_OPERATION_CONCURRENCY); container.processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions) .blockLast(BULK_LOAD_WAIT_DURATION); LOGGER.info("Completed document loading into [{}:{}]", database.getId(), containerName); } /** * Map the generated data to createItem requests in the underlying container * * @param records Data we want to load into the container * @return List of CosmosItemOperation, each mapping to a createItem for that record */ private List<CosmosItemOperation> mapToCosmosItemOperation(final Map<Key, ObjectNode> records) { return records.entrySet() .stream() .map(record -> { final String partitionKey = record.getKey().getPartitioningKey(); final InternalObjectNode objectNode = new InternalObjectNode(record.getValue()); return BulkOperations.getCreateItemOperation(objectNode, new PartitionKey(partitionKey)); }) .collect(Collectors.toList()); } }
class DataLoader { private static final Logger LOGGER = LoggerFactory.getLogger(DataLoader.class); private static final int MAX_BATCH_SIZE = 10000; private static final int BULK_OPERATION_CONCURRENCY = 5; private static final Duration BULK_LOAD_WAIT_DURATION = Duration.ofSeconds(60); private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; private final Configuration _configuration; private final CosmosAsyncClient _client; public DataLoader(final Configuration configuration, final CosmosAsyncClient client) { _configuration = Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); _client = Preconditions.checkNotNull(client, "The CosmosAsyncClient needed for data loading can not be null"); } public void loadData(final Map<Key, ObjectNode> records) { bulkCreateItems(records); validateDataCreation(records.size()); } private void bulkCreateItems(final Map<Key, ObjectNode> records) { final List<CosmosItemOperation> cosmosItemOperations = mapToCosmosItemOperation(records); final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Bulk loading {} documents in [{}:{}]", cosmosItemOperations.size(), database.getId(), containerName); final BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(MAX_BATCH_SIZE) .setMaxMicroBatchConcurrency(BULK_OPERATION_CONCURRENCY); container.processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions) .blockLast(BULK_LOAD_WAIT_DURATION); LOGGER.info("Completed document loading into [{}:{}]", database.getId(), containerName); } /** * Map the generated data to createItem requests in the underlying container * * @param records Data we want to load into the container * @return List of CosmosItemOperation, each mapping to a createItem for that record */ private List<CosmosItemOperation> mapToCosmosItemOperation(final Map<Key, ObjectNode> records) { return records.entrySet() .stream() .map(record -> { final String partitionKey = record.getKey().getPartitioningKey(); final ObjectNode value = record.getValue(); return BulkOperations.getCreateItemOperation(value, new PartitionKey(partitionKey)); }) .collect(Collectors.toList()); } }
if we deleting whole db , we can avoid this code, your choice
public void deleteResources() { deleteExistingContainers(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); try { LOGGER.info("Deleting the main database {} used in this test", _configuration.getDatabaseId()); database.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while deleting the database {}", _configuration.getDatabaseId(), e); throw e; } LOGGER.info("Resource cleanup completed"); }
deleteExistingContainers();
public void deleteResources() { deleteExistingContainers(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); try { LOGGER.info("Deleting the main database {} used in this test", _configuration.getDatabaseId()); database.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while deleting the database {}", _configuration.getDatabaseId(), e); throw e; } LOGGER.info("Resource cleanup completed"); }
class ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(ResourceManager.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); private final Configuration _configuration; private final CosmosAsyncClient _client; public ResourceManager(final Configuration configuration, final CosmosAsyncClient client) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for " + "setting up the Database and containers for the test"); _configuration = configuration; _client = client; } /** * Initialize the CosmosDB database required for running this test, or if the database exists, delete all * legacy containers * * @throws CosmosException in the event of an error creating the underlying database, or deleting * containers from a previously created database of the same name */ public void initializeDatabase() throws CosmosException { try { LOGGER.info("Creating database {} for the ctl workload", _configuration.getDatabaseId()); _client.createDatabaseIfNotExists(_configuration.getDatabaseId()) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating database {}", _configuration.getDatabaseId(), e); throw e; } deleteExistingContainers(); } /** * Create desired container/collection for the test * * @throws CosmosException if the container could not be created */ public void createContainer() throws CosmosException { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final ThroughputProperties containerThroughput = createManualThroughput(_configuration.getThroughput()); try { LOGGER.info("Creating container {} in the database {}", containerName, database.getId()); final CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, PARTITION_KEY_PATH); database.createContainerIfNotExists(containerProperties, containerThroughput) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating container {}", containerName, e); throw e; } } /** * Delete all resources i.e. databases and containers created as part of this test */ private void deleteExistingContainers() { final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final List<CosmosAsyncContainer> cosmosAsyncContainers = database.readAllContainers() .byPage() .toStream() .flatMap(cosmosContainerPropertiesFeedResponse -> cosmosContainerPropertiesFeedResponse.getResults().stream()) .map(cosmosContainerProperties -> database.getContainer(cosmosContainerProperties.getId())) .collect(Collectors.toList()); for (CosmosAsyncContainer cosmosAsyncContainer : cosmosAsyncContainers) { LOGGER.info("Deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId()); try { cosmosAsyncContainer.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Error deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId(), e); } } } }
class ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(ResourceManager.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); private final Configuration _configuration; private final CosmosAsyncClient _client; public ResourceManager(final Configuration configuration, final CosmosAsyncClient client) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for " + "setting up the Database and containers for the test"); _configuration = configuration; _client = client; } /** * Initialize the CosmosDB database required for running this test, or if the database exists, delete all * legacy containers * * @throws CosmosException in the event of an error creating the underlying database, or deleting * containers from a previously created database of the same name */ public void initializeDatabase() throws CosmosException { try { LOGGER.info("Creating database {} for the ctl workload", _configuration.getDatabaseId()); _client.createDatabaseIfNotExists(_configuration.getDatabaseId()) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating database {}", _configuration.getDatabaseId(), e); throw e; } deleteExistingContainers(); } /** * Create desired container/collection for the test * * @throws CosmosException if the container could not be created */ public void createContainer() throws CosmosException { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final ThroughputProperties containerThroughput = createManualThroughput(_configuration.getThroughput()); try { LOGGER.info("Creating container {} in the database {}", containerName, database.getId()); final CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerName, PARTITION_KEY_PATH); database.createContainerIfNotExists(containerProperties, containerThroughput) .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Exception while creating container {}", containerName, e); throw e; } } /** * Delete all resources i.e. databases and containers created as part of this test */ private void deleteExistingContainers() { final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final List<CosmosAsyncContainer> cosmosAsyncContainers = database.readAllContainers() .byPage() .toStream() .flatMap(cosmosContainerPropertiesFeedResponse -> cosmosContainerPropertiesFeedResponse.getResults().stream()) .map(cosmosContainerProperties -> database.getContainer(cosmosContainerProperties.getId())) .collect(Collectors.toList()); for (CosmosAsyncContainer cosmosAsyncContainer : cosmosAsyncContainers) { LOGGER.info("Deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId()); try { cosmosAsyncContainer.delete() .block(RESOURCE_CRUD_WAIT_TIME); } catch (CosmosException e) { LOGGER.error("Error deleting container {} in the Database {}", cosmosAsyncContainer.getId(), _configuration.getDatabaseId(), e); } } } }
Can we use org.assertj.core.api assertThat, we have been using this in our sdk .
public void testGenerate() { final InvitationDataGenerator invitationDataGenerator = new InvitationDataGenerator(); final Map<Key, ObjectNode> results = invitationDataGenerator.generate(RECORD_COUNT); assertEquals(results.size(), RECORD_COUNT); }
assertEquals(results.size(), RECORD_COUNT);
public void testGenerate() { final InvitationDataGenerator invitationDataGenerator = new InvitationDataGenerator(); final Map<Key, ObjectNode> results = invitationDataGenerator.generate(RECORD_COUNT); assertThat(results.size()).isEqualTo(RECORD_COUNT); }
class TestInvitationDataGenerator { private static final int RECORD_COUNT = 10000; @Test }
class TestInvitationDataGenerator { private static final int RECORD_COUNT = 10000; @Test(groups="unit") }
Good point. The reason I have this at exact match is because the following code assumes all the data in the Map is the DB (it uses the keys in the Map to randomly select the Keys). Let's keep it for now, and I am open to changing it to say 95% tolerance if the record counts mismatch.
private void validateDataCreation(int expectedSize) { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Validating {} documents were loaded into [{}:{}]", expectedSize, _configuration.getDatabaseId(), containerName); final List<FeedResponse<InternalObjectNode>> queryItemsResponseList = container .queryItems(COUNT_ALL_QUERY, InternalObjectNode.class) .byPage() .collectList() .block(BULK_LOAD_WAIT_DURATION); final int resultCount = Optional.ofNullable(queryItemsResponseList) .map(responseList -> responseList.get(0)) .map(FeedResponse::getResults) .map(list -> list.get(0)) .map(internalObjectNode -> { try { final ObjectNode result = internalObjectNode.getObject(ObjectNode.class); return result.get(COUNT_ALL_QUERY_RESULT_FIELD).intValue(); } catch (IOException ex) { LOGGER.error("Error extracting the result count from the response"); } return 0; }) .orElse(0); if (resultCount != expectedSize) { throw new IllegalStateException("Expected number of records " + expectedSize + " not found in the container " + containerName + ". Actual count: " + resultCount); } }
final ObjectNode result = internalObjectNode.getObject(ObjectNode.class);
private void validateDataCreation(int expectedSize) { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Validating {} documents were loaded into [{}:{}]", expectedSize, _configuration.getDatabaseId(), containerName); final List<FeedResponse<ObjectNode>> queryItemsResponseList = container .queryItems(COUNT_ALL_QUERY, ObjectNode.class) .byPage() .collectList() .block(BULK_LOAD_WAIT_DURATION); final int resultCount = Optional.ofNullable(queryItemsResponseList) .map(responseList -> responseList.get(0)) .map(FeedResponse::getResults) .map(list -> list.get(0)) .map(objectNode -> objectNode.get(COUNT_ALL_QUERY_RESULT_FIELD).intValue()) .orElse(0); if (resultCount != expectedSize) { throw new IllegalStateException("Expected number of records " + expectedSize + " not found in the container " + containerName + ". Actual count: " + resultCount); } }
class DataLoader { private static final Logger LOGGER = LoggerFactory.getLogger(DataLoader.class); private static final int MAX_BATCH_SIZE = 10000; private static final int BULK_OPERATION_CONCURRENCY = 5; private static final Duration BULK_LOAD_WAIT_DURATION = Duration.ofSeconds(60); private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; private final Configuration _configuration; private final CosmosAsyncClient _client; public DataLoader(final Configuration configuration, final CosmosAsyncClient client) { _configuration = Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); _client = Preconditions.checkNotNull(client, "The CosmosAsyncClient needed for data loading can not be null"); } public void loadData(final Map<Key, ObjectNode> records) { bulkCreateItems(records); validateDataCreation(records.size()); } private void bulkCreateItems(final Map<Key, ObjectNode> records) { final List<CosmosItemOperation> cosmosItemOperations = mapToCosmosItemOperation(records); final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Bulk loading {} documents in [{}:{}]", cosmosItemOperations.size(), database.getId(), containerName); final BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(MAX_BATCH_SIZE) .setMaxMicroBatchConcurrency(BULK_OPERATION_CONCURRENCY); container.processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions) .blockLast(BULK_LOAD_WAIT_DURATION); LOGGER.info("Completed document loading into [{}:{}]", database.getId(), containerName); } /** * Map the generated data to createItem requests in the underlying container * * @param records Data we want to load into the container * @return List of CosmosItemOperation, each mapping to a createItem for that record */ private List<CosmosItemOperation> mapToCosmosItemOperation(final Map<Key, ObjectNode> records) { return records.entrySet() .stream() .map(record -> { final String partitionKey = record.getKey().getPartitioningKey(); final InternalObjectNode objectNode = new InternalObjectNode(record.getValue()); return BulkOperations.getCreateItemOperation(objectNode, new PartitionKey(partitionKey)); }) .collect(Collectors.toList()); } }
class DataLoader { private static final Logger LOGGER = LoggerFactory.getLogger(DataLoader.class); private static final int MAX_BATCH_SIZE = 10000; private static final int BULK_OPERATION_CONCURRENCY = 5; private static final Duration BULK_LOAD_WAIT_DURATION = Duration.ofSeconds(60); private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; private final Configuration _configuration; private final CosmosAsyncClient _client; public DataLoader(final Configuration configuration, final CosmosAsyncClient client) { _configuration = Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); _client = Preconditions.checkNotNull(client, "The CosmosAsyncClient needed for data loading can not be null"); } public void loadData(final Map<Key, ObjectNode> records) { bulkCreateItems(records); validateDataCreation(records.size()); } private void bulkCreateItems(final Map<Key, ObjectNode> records) { final List<CosmosItemOperation> cosmosItemOperations = mapToCosmosItemOperation(records); final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Bulk loading {} documents in [{}:{}]", cosmosItemOperations.size(), database.getId(), containerName); final BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(MAX_BATCH_SIZE) .setMaxMicroBatchConcurrency(BULK_OPERATION_CONCURRENCY); container.processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions) .blockLast(BULK_LOAD_WAIT_DURATION); LOGGER.info("Completed document loading into [{}:{}]", database.getId(), containerName); } /** * Map the generated data to createItem requests in the underlying container * * @param records Data we want to load into the container * @return List of CosmosItemOperation, each mapping to a createItem for that record */ private List<CosmosItemOperation> mapToCosmosItemOperation(final Map<Key, ObjectNode> records) { return records.entrySet() .stream() .map(record -> { final String partitionKey = record.getKey().getPartitioningKey(); final ObjectNode value = record.getValue(); return BulkOperations.getCreateItemOperation(value, new PartitionKey(partitionKey)); }) .collect(Collectors.toList()); } }
code style?
public ModelAndView index() { ModelAndView model = new ModelAndView("index"); model.addObject("aad_clientId", aadAuthenticationProperties.getClientId()); model.addObject("aad_tenantId", aadAuthenticationProperties.getTenantId()); model.addObject("aad_redirectUri", Optional .ofNullable(aadAuthenticationProperties.getRedirectUriTemplate()) .orElse("http: return model; }
model.addObject("aad_redirectUri", Optional
public ModelAndView index() { ModelAndView model = new ModelAndView("index"); model.addObject("aad_clientId", aadAuthenticationProperties.getClientId()); model.addObject("aad_tenantId", aadAuthenticationProperties.getTenantId()); model.addObject("aad_redirectUri", Optional .ofNullable(aadAuthenticationProperties.getRedirectUriTemplate()) .orElse("http: return model; }
class TodoListController { @Autowired private AADAuthenticationProperties aadAuthenticationProperties; private final List<TodoItem> todoList = new ArrayList<>(); public TodoListController() { todoList.add(0, new TodoItem(2398, "anything", "whoever")); } @RequestMapping("/home") public Map<String, Object> home() { final Map<String, Object> model = new HashMap<>(); model.put("id", UUID.randomUUID().toString()); model.put("content", "home"); return model; } @RequestMapping({"/"}) /** * HTTP GET */ @RequestMapping(value = "/api/todolist/{index}", method = RequestMethod.GET, produces = {MediaType.APPLICATION_JSON_VALUE}) public ResponseEntity<?> getTodoItem(@PathVariable("index") int index) { if (index > todoList.size() - 1) { return new ResponseEntity<>(new TodoItem(-1, "index out of range", null), HttpStatus.NOT_FOUND); } return new ResponseEntity<>(todoList.get(index), HttpStatus.OK); } /** * HTTP GET ALL */ @RequestMapping(value = "/api/todolist", method = RequestMethod.GET, produces = {MediaType.APPLICATION_JSON_VALUE}) public ResponseEntity<List<TodoItem>> getAllTodoItems() { return new ResponseEntity<>(todoList, HttpStatus.OK); } @PreAuthorize("hasRole('ROLE_group1')") @RequestMapping(value = "/api/todolist", method = RequestMethod.POST, consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<String> addNewTodoItem(@RequestBody TodoItem item) { item.setID(todoList.size() + 1); todoList.add(todoList.size(), item); return new ResponseEntity<>("Entity created", HttpStatus.CREATED); } /** * HTTP PUT */ @PreAuthorize("hasRole('ROLE_group1')") @RequestMapping(value = "/api/todolist", method = RequestMethod.PUT, consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<String> updateTodoItem(@RequestBody TodoItem item) { final List<TodoItem> find = todoList.stream().filter(i -> i.getID() == item.getID()).collect(Collectors.toList()); if (!find.isEmpty()) { todoList.set(todoList.indexOf(find.get(0)), item); return new ResponseEntity<>("Entity is updated", HttpStatus.OK); } return new ResponseEntity<>("Entity not found", HttpStatus.OK); } /** * HTTP DELETE */ @RequestMapping(value = "/api/todolist/{id}", method = RequestMethod.DELETE) public ResponseEntity<String> deleteTodoItem(@PathVariable("id") int id, PreAuthenticatedAuthenticationToken authToken) { final UserPrincipal current = (UserPrincipal) authToken.getPrincipal(); Membership membership = new Membership( "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", Membership.OBJECT_TYPE_GROUP, "group1"); if (current.isMemberOf(aadAuthenticationProperties, membership.getDisplayName())) { return todoList.stream() .filter(i -> i.getID() == id) .findFirst() .map(item -> { todoList.remove(item); return new ResponseEntity<>("OK", HttpStatus.OK); }) .orElseGet(() -> new ResponseEntity<>("Entity not found", HttpStatus.OK)); } else { return new ResponseEntity<>("Access is denied", HttpStatus.OK); } } }
class TodoListController { @Autowired private AADAuthenticationProperties aadAuthenticationProperties; private final List<TodoItem> todoList = new ArrayList<>(); public TodoListController() { todoList.add(0, new TodoItem(2398, "anything", "whoever")); } @RequestMapping("/home") public Map<String, Object> home() { final Map<String, Object> model = new HashMap<>(); model.put("id", UUID.randomUUID().toString()); model.put("content", "home"); return model; } @RequestMapping({"/"}) /** * HTTP GET */ @RequestMapping(value = "/api/todolist/{index}", method = RequestMethod.GET, produces = {MediaType.APPLICATION_JSON_VALUE}) public ResponseEntity<?> getTodoItem(@PathVariable("index") int index) { if (index > todoList.size() - 1) { return new ResponseEntity<>(new TodoItem(-1, "index out of range", null), HttpStatus.NOT_FOUND); } return new ResponseEntity<>(todoList.get(index), HttpStatus.OK); } /** * HTTP GET ALL */ @RequestMapping(value = "/api/todolist", method = RequestMethod.GET, produces = {MediaType.APPLICATION_JSON_VALUE}) public ResponseEntity<List<TodoItem>> getAllTodoItems() { return new ResponseEntity<>(todoList, HttpStatus.OK); } @PreAuthorize("hasRole('ROLE_group1')") @RequestMapping(value = "/api/todolist", method = RequestMethod.POST, consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<String> addNewTodoItem(@RequestBody TodoItem item) { item.setID(todoList.size() + 1); todoList.add(todoList.size(), item); return new ResponseEntity<>("Entity created", HttpStatus.CREATED); } /** * HTTP PUT */ @PreAuthorize("hasRole('ROLE_group1')") @RequestMapping(value = "/api/todolist", method = RequestMethod.PUT, consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<String> updateTodoItem(@RequestBody TodoItem item) { final List<TodoItem> find = todoList.stream().filter(i -> i.getID() == item.getID()).collect(Collectors.toList()); if (!find.isEmpty()) { todoList.set(todoList.indexOf(find.get(0)), item); return new ResponseEntity<>("Entity is updated", HttpStatus.OK); } return new ResponseEntity<>("Entity not found", HttpStatus.OK); } /** * HTTP DELETE */ @RequestMapping(value = "/api/todolist/{id}", method = RequestMethod.DELETE) public ResponseEntity<String> deleteTodoItem(@PathVariable("id") int id, PreAuthenticatedAuthenticationToken authToken) { final UserPrincipal current = (UserPrincipal) authToken.getPrincipal(); Membership membership = new Membership( "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", Membership.OBJECT_TYPE_GROUP, "group1"); if (current.isMemberOf(aadAuthenticationProperties, membership.getDisplayName())) { return todoList.stream() .filter(i -> i.getID() == id) .findFirst() .map(item -> { todoList.remove(item); return new ResponseEntity<>("OK", HttpStatus.OK); }) .orElseGet(() -> new ResponseEntity<>("Entity not found", HttpStatus.OK)); } else { return new ResponseEntity<>("Access is denied", HttpStatus.OK); } } }
Here throughput RPS/QPS will not be equals to concurrency. We are using threads equals to concurrency , so lets say if we choose 50 threads, our throughput will be much higher . No issue here but just mentioning this as per our offline conversation, so we are aware of our load.
public void run(Map<Key, ObjectNode> testData) { final ArrayList<Key> keys = new ArrayList<>(testData.keySet()); Collections.shuffle(keys); final long runStartTime = System.currentTimeMillis(); final AtomicLong successCount = new AtomicLong(0); final AtomicLong errorCount = new AtomicLong(0); long i = 0; for (; BenchmarkHelper.shouldContinue(runStartTime, i, _configuration); i++) { int index = (int) ((i % keys.size()) % Integer.MAX_VALUE); final Key key = keys.get(index); _executorService.submit(() -> runOperation(key, testData, successCount, errorCount)); } try { _executorService.awaitTermination(60, TimeUnit.SECONDS); } catch (InterruptedException e) { LOGGER.error("Error awaiting the completion of all tasks", e); } final Instant runEndTime = Instant.now(); LOGGER.info("Number of iterations: {}, Errors: {}, Runtime: {} millis", successCount.get(), errorCount.get(), runEndTime.minusMillis(runStartTime).toEpochMilli()); _executorService.shutdown(); }
_executorService.submit(() -> runOperation(key, testData, successCount, errorCount));
public void run(Map<Key, ObjectNode> testData) { final ArrayList<Key> keys = new ArrayList<>(testData.keySet()); Collections.shuffle(keys); final long runStartTime = System.currentTimeMillis(); final AtomicLong successCount = new AtomicLong(0); final AtomicLong errorCount = new AtomicLong(0); long i = 0; for (; BenchmarkHelper.shouldContinue(runStartTime, i, _configuration); i++) { int index = (int) ((i % keys.size()) % Integer.MAX_VALUE); final Key key = keys.get(index); _executorService.submit(() -> runOperation(key, testData, successCount, errorCount)); } try { _executorService.awaitTermination(60, TimeUnit.SECONDS); } catch (InterruptedException e) { LOGGER.error("Error awaiting the completion of all tasks", e); } final Instant runEndTime = Instant.now(); LOGGER.info("Number of iterations: {}, Errors: {}, Runtime: {} millis", successCount.get(), errorCount.get(), runEndTime.minusMillis(runStartTime).toEpochMilli()); _executorService.shutdown(); }
class GetTestRunner { private static final Logger LOGGER = LoggerFactory.getLogger(GetTestRunner.class); private static final boolean VALIDATE_RESULTS = false; private final Configuration _configuration; private final Accessor<Key, JsonNode> _accessor; private final ExecutorService _executorService; GetTestRunner(final Configuration configuration, final CosmosAsyncClient client, final MetricRegistry metricsRegistry) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for setting up the Database and containers for the test"); Preconditions.checkNotNull(metricsRegistry, "The MetricsRegistry can not be null"); _configuration = configuration; _accessor = createAccessor(configuration, client, metricsRegistry); _executorService = Executors.newFixedThreadPool(_configuration.getConcurrency()); } private void runOperation(final Key key, final Map<Key, ObjectNode> testData, final AtomicLong successCount, final AtomicLong errorCount) { try { final Result<Key, JsonNode> result = _accessor.get(key, GetRequestOptions.EMPTY_REQUEST_OPTIONS); if (VALIDATE_RESULTS && !expectedResponse(testData.get(key), result)) { LOGGER.info("Result mismatch for Key {}; Actual value: {}, Expected: {}", key, result.getResult(), testData.get(key)); errorCount.getAndIncrement(); } successCount.getAndIncrement(); } catch (AccessorException e) { errorCount.getAndIncrement(); } } private boolean expectedResponse(final ObjectNode expectedResult, final Result<Key, JsonNode> result) { final JsonNode actualValue = result.getResult() .map(Entity::get) .orElse(null); final Iterator<String> fieldNames = expectedResult.fieldNames(); while (fieldNames.hasNext()) { final String field = fieldNames.next(); final JsonNode expectedJsonNode = expectedResult.get(field); final JsonNode actualJsonNode = actualValue.get(field); if (!expectedJsonNode.equals(actualJsonNode)) { LOGGER.info("mismatch Actual value: {}, Expected: {}", actualJsonNode, expectedJsonNode); return false; } } return true; } private Accessor<Key, JsonNode> createAccessor(final Configuration configuration, final CosmosAsyncClient client, final MetricRegistry metricsRegistry) { final StaticDataLocator dataLocator = createDataLocator(configuration, client); final KeyExtractor<Key> keyExtractor = new KeyExtractorImpl(); final DocumentTransformer<JsonNode, JsonNode> documentTransformer = new IdentityDocumentTransformer<>(); final Clock clock = Clock.systemUTC(); return new CosmosDBDataAccessor<>(dataLocator, keyExtractor, new ResponseHandler<>(documentTransformer, keyExtractor), new MetricsFactory(metricsRegistry, clock), clock, new OperationsLogger(Duration.ofSeconds(10))); } private StaticDataLocator createDataLocator(Configuration configuration, CosmosAsyncClient client) { final CollectionKey collectionKey = new CollectionKey(configuration.getServiceEndpoint(), configuration.getDatabaseId(), configuration.getCollectionId()); final CosmosAsyncDatabase database = client.getDatabase(configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(configuration.getCollectionId()); return new StaticDataLocator(collectionKey, container); } }
class GetTestRunner { private static final Logger LOGGER = LoggerFactory.getLogger(GetTestRunner.class); private static final boolean VALIDATE_RESULTS = false; private final Configuration _configuration; private final Accessor<Key, JsonNode> _accessor; private final ExecutorService _executorService; GetTestRunner(final Configuration configuration, final CosmosAsyncClient client, final MetricRegistry metricsRegistry) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for setting up the Database and containers for the test"); Preconditions.checkNotNull(metricsRegistry, "The MetricsRegistry can not be null"); _configuration = configuration; _accessor = createAccessor(configuration, client, metricsRegistry); _executorService = Executors.newFixedThreadPool(_configuration.getConcurrency()); } private void runOperation(final Key key, final Map<Key, ObjectNode> testData, final AtomicLong successCount, final AtomicLong errorCount) { try { final Result<Key, JsonNode> result = _accessor.get(key, GetRequestOptions.EMPTY_REQUEST_OPTIONS); if (VALIDATE_RESULTS && !expectedResponse(testData.get(key), result)) { LOGGER.info("Result mismatch for Key {}; Actual value: {}, Expected: {}", key, result.getResult(), testData.get(key)); errorCount.getAndIncrement(); } successCount.getAndIncrement(); } catch (AccessorException e) { errorCount.getAndIncrement(); } } private boolean expectedResponse(final ObjectNode expectedResult, final Result<Key, JsonNode> result) { final JsonNode actualValue = result.getResult() .map(Entity::get) .orElse(null); final Iterator<String> fieldNames = expectedResult.fieldNames(); while (fieldNames.hasNext()) { final String field = fieldNames.next(); final JsonNode expectedJsonNode = expectedResult.get(field); final JsonNode actualJsonNode = actualValue.get(field); if (!expectedJsonNode.equals(actualJsonNode)) { LOGGER.info("mismatch Actual value: {}, Expected: {}", actualJsonNode, expectedJsonNode); return false; } } return true; } private Accessor<Key, JsonNode> createAccessor(final Configuration configuration, final CosmosAsyncClient client, final MetricRegistry metricsRegistry) { final StaticDataLocator dataLocator = createDataLocator(configuration, client); final KeyExtractor<Key> keyExtractor = new KeyExtractorImpl(); final DocumentTransformer<JsonNode, JsonNode> documentTransformer = new IdentityDocumentTransformer<>(); final Clock clock = Clock.systemUTC(); return new CosmosDBDataAccessor<>(dataLocator, keyExtractor, new ResponseHandler<>(documentTransformer, keyExtractor), new MetricsFactory(metricsRegistry, clock), clock, new OperationsLogger(Duration.ofSeconds(10))); } private StaticDataLocator createDataLocator(Configuration configuration, CosmosAsyncClient client) { final CollectionKey collectionKey = new CollectionKey(configuration.getServiceEndpoint(), configuration.getDatabaseId(), configuration.getCollectionId()); final CosmosAsyncDatabase database = client.getDatabase(configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(configuration.getCollectionId()); return new StaticDataLocator(collectionKey, container); } }
That's right. The actual QPS to the collection would be determined by the operation latency
public void run(Map<Key, ObjectNode> testData) { final ArrayList<Key> keys = new ArrayList<>(testData.keySet()); Collections.shuffle(keys); final long runStartTime = System.currentTimeMillis(); final AtomicLong successCount = new AtomicLong(0); final AtomicLong errorCount = new AtomicLong(0); long i = 0; for (; BenchmarkHelper.shouldContinue(runStartTime, i, _configuration); i++) { int index = (int) ((i % keys.size()) % Integer.MAX_VALUE); final Key key = keys.get(index); _executorService.submit(() -> runOperation(key, testData, successCount, errorCount)); } try { _executorService.awaitTermination(60, TimeUnit.SECONDS); } catch (InterruptedException e) { LOGGER.error("Error awaiting the completion of all tasks", e); } final Instant runEndTime = Instant.now(); LOGGER.info("Number of iterations: {}, Errors: {}, Runtime: {} millis", successCount.get(), errorCount.get(), runEndTime.minusMillis(runStartTime).toEpochMilli()); _executorService.shutdown(); }
_executorService.submit(() -> runOperation(key, testData, successCount, errorCount));
public void run(Map<Key, ObjectNode> testData) { final ArrayList<Key> keys = new ArrayList<>(testData.keySet()); Collections.shuffle(keys); final long runStartTime = System.currentTimeMillis(); final AtomicLong successCount = new AtomicLong(0); final AtomicLong errorCount = new AtomicLong(0); long i = 0; for (; BenchmarkHelper.shouldContinue(runStartTime, i, _configuration); i++) { int index = (int) ((i % keys.size()) % Integer.MAX_VALUE); final Key key = keys.get(index); _executorService.submit(() -> runOperation(key, testData, successCount, errorCount)); } try { _executorService.awaitTermination(60, TimeUnit.SECONDS); } catch (InterruptedException e) { LOGGER.error("Error awaiting the completion of all tasks", e); } final Instant runEndTime = Instant.now(); LOGGER.info("Number of iterations: {}, Errors: {}, Runtime: {} millis", successCount.get(), errorCount.get(), runEndTime.minusMillis(runStartTime).toEpochMilli()); _executorService.shutdown(); }
class GetTestRunner { private static final Logger LOGGER = LoggerFactory.getLogger(GetTestRunner.class); private static final boolean VALIDATE_RESULTS = false; private final Configuration _configuration; private final Accessor<Key, JsonNode> _accessor; private final ExecutorService _executorService; GetTestRunner(final Configuration configuration, final CosmosAsyncClient client, final MetricRegistry metricsRegistry) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for setting up the Database and containers for the test"); Preconditions.checkNotNull(metricsRegistry, "The MetricsRegistry can not be null"); _configuration = configuration; _accessor = createAccessor(configuration, client, metricsRegistry); _executorService = Executors.newFixedThreadPool(_configuration.getConcurrency()); } private void runOperation(final Key key, final Map<Key, ObjectNode> testData, final AtomicLong successCount, final AtomicLong errorCount) { try { final Result<Key, JsonNode> result = _accessor.get(key, GetRequestOptions.EMPTY_REQUEST_OPTIONS); if (VALIDATE_RESULTS && !expectedResponse(testData.get(key), result)) { LOGGER.info("Result mismatch for Key {}; Actual value: {}, Expected: {}", key, result.getResult(), testData.get(key)); errorCount.getAndIncrement(); } successCount.getAndIncrement(); } catch (AccessorException e) { errorCount.getAndIncrement(); } } private boolean expectedResponse(final ObjectNode expectedResult, final Result<Key, JsonNode> result) { final JsonNode actualValue = result.getResult() .map(Entity::get) .orElse(null); final Iterator<String> fieldNames = expectedResult.fieldNames(); while (fieldNames.hasNext()) { final String field = fieldNames.next(); final JsonNode expectedJsonNode = expectedResult.get(field); final JsonNode actualJsonNode = actualValue.get(field); if (!expectedJsonNode.equals(actualJsonNode)) { LOGGER.info("mismatch Actual value: {}, Expected: {}", actualJsonNode, expectedJsonNode); return false; } } return true; } private Accessor<Key, JsonNode> createAccessor(final Configuration configuration, final CosmosAsyncClient client, final MetricRegistry metricsRegistry) { final StaticDataLocator dataLocator = createDataLocator(configuration, client); final KeyExtractor<Key> keyExtractor = new KeyExtractorImpl(); final DocumentTransformer<JsonNode, JsonNode> documentTransformer = new IdentityDocumentTransformer<>(); final Clock clock = Clock.systemUTC(); return new CosmosDBDataAccessor<>(dataLocator, keyExtractor, new ResponseHandler<>(documentTransformer, keyExtractor), new MetricsFactory(metricsRegistry, clock), clock, new OperationsLogger(Duration.ofSeconds(10))); } private StaticDataLocator createDataLocator(Configuration configuration, CosmosAsyncClient client) { final CollectionKey collectionKey = new CollectionKey(configuration.getServiceEndpoint(), configuration.getDatabaseId(), configuration.getCollectionId()); final CosmosAsyncDatabase database = client.getDatabase(configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(configuration.getCollectionId()); return new StaticDataLocator(collectionKey, container); } }
class GetTestRunner { private static final Logger LOGGER = LoggerFactory.getLogger(GetTestRunner.class); private static final boolean VALIDATE_RESULTS = false; private final Configuration _configuration; private final Accessor<Key, JsonNode> _accessor; private final ExecutorService _executorService; GetTestRunner(final Configuration configuration, final CosmosAsyncClient client, final MetricRegistry metricsRegistry) { Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for setting up the Database and containers for the test"); Preconditions.checkNotNull(metricsRegistry, "The MetricsRegistry can not be null"); _configuration = configuration; _accessor = createAccessor(configuration, client, metricsRegistry); _executorService = Executors.newFixedThreadPool(_configuration.getConcurrency()); } private void runOperation(final Key key, final Map<Key, ObjectNode> testData, final AtomicLong successCount, final AtomicLong errorCount) { try { final Result<Key, JsonNode> result = _accessor.get(key, GetRequestOptions.EMPTY_REQUEST_OPTIONS); if (VALIDATE_RESULTS && !expectedResponse(testData.get(key), result)) { LOGGER.info("Result mismatch for Key {}; Actual value: {}, Expected: {}", key, result.getResult(), testData.get(key)); errorCount.getAndIncrement(); } successCount.getAndIncrement(); } catch (AccessorException e) { errorCount.getAndIncrement(); } } private boolean expectedResponse(final ObjectNode expectedResult, final Result<Key, JsonNode> result) { final JsonNode actualValue = result.getResult() .map(Entity::get) .orElse(null); final Iterator<String> fieldNames = expectedResult.fieldNames(); while (fieldNames.hasNext()) { final String field = fieldNames.next(); final JsonNode expectedJsonNode = expectedResult.get(field); final JsonNode actualJsonNode = actualValue.get(field); if (!expectedJsonNode.equals(actualJsonNode)) { LOGGER.info("mismatch Actual value: {}, Expected: {}", actualJsonNode, expectedJsonNode); return false; } } return true; } private Accessor<Key, JsonNode> createAccessor(final Configuration configuration, final CosmosAsyncClient client, final MetricRegistry metricsRegistry) { final StaticDataLocator dataLocator = createDataLocator(configuration, client); final KeyExtractor<Key> keyExtractor = new KeyExtractorImpl(); final DocumentTransformer<JsonNode, JsonNode> documentTransformer = new IdentityDocumentTransformer<>(); final Clock clock = Clock.systemUTC(); return new CosmosDBDataAccessor<>(dataLocator, keyExtractor, new ResponseHandler<>(documentTransformer, keyExtractor), new MetricsFactory(metricsRegistry, clock), clock, new OperationsLogger(Duration.ofSeconds(10))); } private StaticDataLocator createDataLocator(Configuration configuration, CosmosAsyncClient client) { final CollectionKey collectionKey = new CollectionKey(configuration.getServiceEndpoint(), configuration.getDatabaseId(), configuration.getCollectionId()); final CosmosAsyncDatabase database = client.getDatabase(configuration.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(configuration.getCollectionId()); return new StaticDataLocator(collectionKey, container); } }
I think @saragluna means change `aad_clientId` to `aad_client_id`.
public ModelAndView index() { ModelAndView model = new ModelAndView("index"); model.addObject("aad_clientId", aadAuthenticationProperties.getClientId()); model.addObject("aad_tenantId", aadAuthenticationProperties.getTenantId()); model.addObject("aad_redirectUri", Optional .ofNullable(aadAuthenticationProperties.getRedirectUriTemplate()) .orElse("http: return model; }
model.addObject("aad_redirectUri", Optional
public ModelAndView index() { ModelAndView model = new ModelAndView("index"); model.addObject("aad_clientId", aadAuthenticationProperties.getClientId()); model.addObject("aad_tenantId", aadAuthenticationProperties.getTenantId()); model.addObject("aad_redirectUri", Optional .ofNullable(aadAuthenticationProperties.getRedirectUriTemplate()) .orElse("http: return model; }
class TodoListController { @Autowired private AADAuthenticationProperties aadAuthenticationProperties; private final List<TodoItem> todoList = new ArrayList<>(); public TodoListController() { todoList.add(0, new TodoItem(2398, "anything", "whoever")); } @RequestMapping("/home") public Map<String, Object> home() { final Map<String, Object> model = new HashMap<>(); model.put("id", UUID.randomUUID().toString()); model.put("content", "home"); return model; } @RequestMapping({"/"}) /** * HTTP GET */ @RequestMapping(value = "/api/todolist/{index}", method = RequestMethod.GET, produces = {MediaType.APPLICATION_JSON_VALUE}) public ResponseEntity<?> getTodoItem(@PathVariable("index") int index) { if (index > todoList.size() - 1) { return new ResponseEntity<>(new TodoItem(-1, "index out of range", null), HttpStatus.NOT_FOUND); } return new ResponseEntity<>(todoList.get(index), HttpStatus.OK); } /** * HTTP GET ALL */ @RequestMapping(value = "/api/todolist", method = RequestMethod.GET, produces = {MediaType.APPLICATION_JSON_VALUE}) public ResponseEntity<List<TodoItem>> getAllTodoItems() { return new ResponseEntity<>(todoList, HttpStatus.OK); } @PreAuthorize("hasRole('ROLE_group1')") @RequestMapping(value = "/api/todolist", method = RequestMethod.POST, consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<String> addNewTodoItem(@RequestBody TodoItem item) { item.setID(todoList.size() + 1); todoList.add(todoList.size(), item); return new ResponseEntity<>("Entity created", HttpStatus.CREATED); } /** * HTTP PUT */ @PreAuthorize("hasRole('ROLE_group1')") @RequestMapping(value = "/api/todolist", method = RequestMethod.PUT, consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<String> updateTodoItem(@RequestBody TodoItem item) { final List<TodoItem> find = todoList.stream().filter(i -> i.getID() == item.getID()).collect(Collectors.toList()); if (!find.isEmpty()) { todoList.set(todoList.indexOf(find.get(0)), item); return new ResponseEntity<>("Entity is updated", HttpStatus.OK); } return new ResponseEntity<>("Entity not found", HttpStatus.OK); } /** * HTTP DELETE */ @RequestMapping(value = "/api/todolist/{id}", method = RequestMethod.DELETE) public ResponseEntity<String> deleteTodoItem(@PathVariable("id") int id, PreAuthenticatedAuthenticationToken authToken) { final UserPrincipal current = (UserPrincipal) authToken.getPrincipal(); Membership membership = new Membership( "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", Membership.OBJECT_TYPE_GROUP, "group1"); if (current.isMemberOf(aadAuthenticationProperties, membership.getDisplayName())) { return todoList.stream() .filter(i -> i.getID() == id) .findFirst() .map(item -> { todoList.remove(item); return new ResponseEntity<>("OK", HttpStatus.OK); }) .orElseGet(() -> new ResponseEntity<>("Entity not found", HttpStatus.OK)); } else { return new ResponseEntity<>("Access is denied", HttpStatus.OK); } } }
class TodoListController { @Autowired private AADAuthenticationProperties aadAuthenticationProperties; private final List<TodoItem> todoList = new ArrayList<>(); public TodoListController() { todoList.add(0, new TodoItem(2398, "anything", "whoever")); } @RequestMapping("/home") public Map<String, Object> home() { final Map<String, Object> model = new HashMap<>(); model.put("id", UUID.randomUUID().toString()); model.put("content", "home"); return model; } @RequestMapping({"/"}) /** * HTTP GET */ @RequestMapping(value = "/api/todolist/{index}", method = RequestMethod.GET, produces = {MediaType.APPLICATION_JSON_VALUE}) public ResponseEntity<?> getTodoItem(@PathVariable("index") int index) { if (index > todoList.size() - 1) { return new ResponseEntity<>(new TodoItem(-1, "index out of range", null), HttpStatus.NOT_FOUND); } return new ResponseEntity<>(todoList.get(index), HttpStatus.OK); } /** * HTTP GET ALL */ @RequestMapping(value = "/api/todolist", method = RequestMethod.GET, produces = {MediaType.APPLICATION_JSON_VALUE}) public ResponseEntity<List<TodoItem>> getAllTodoItems() { return new ResponseEntity<>(todoList, HttpStatus.OK); } @PreAuthorize("hasRole('ROLE_group1')") @RequestMapping(value = "/api/todolist", method = RequestMethod.POST, consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<String> addNewTodoItem(@RequestBody TodoItem item) { item.setID(todoList.size() + 1); todoList.add(todoList.size(), item); return new ResponseEntity<>("Entity created", HttpStatus.CREATED); } /** * HTTP PUT */ @PreAuthorize("hasRole('ROLE_group1')") @RequestMapping(value = "/api/todolist", method = RequestMethod.PUT, consumes = MediaType.APPLICATION_JSON_VALUE) public ResponseEntity<String> updateTodoItem(@RequestBody TodoItem item) { final List<TodoItem> find = todoList.stream().filter(i -> i.getID() == item.getID()).collect(Collectors.toList()); if (!find.isEmpty()) { todoList.set(todoList.indexOf(find.get(0)), item); return new ResponseEntity<>("Entity is updated", HttpStatus.OK); } return new ResponseEntity<>("Entity not found", HttpStatus.OK); } /** * HTTP DELETE */ @RequestMapping(value = "/api/todolist/{id}", method = RequestMethod.DELETE) public ResponseEntity<String> deleteTodoItem(@PathVariable("id") int id, PreAuthenticatedAuthenticationToken authToken) { final UserPrincipal current = (UserPrincipal) authToken.getPrincipal(); Membership membership = new Membership( "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", Membership.OBJECT_TYPE_GROUP, "group1"); if (current.isMemberOf(aadAuthenticationProperties, membership.getDisplayName())) { return todoList.stream() .filter(i -> i.getID() == id) .findFirst() .map(item -> { todoList.remove(item); return new ResponseEntity<>("OK", HttpStatus.OK); }) .orElseGet(() -> new ResponseEntity<>("Entity not found", HttpStatus.OK)); } else { return new ResponseEntity<>("Access is denied", HttpStatus.OK); } } }
Any reason a `LinkedList` was chosen over an `ArrayList`? Are we aiming to reduce the number of possible resizes that an `ArrayList` would perform internally? Do we want less memory thrashing or potentially better memory locality here?
public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); }
this.values = new LinkedList<>();
public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); }
class Header { private final String name; private List<String> values; private String cachedStringValue; private String[] cachedStringValues; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.addAll(Arrays.asList(values)); } public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = values; } /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { checkCachedStringValue(); return cachedStringValues; } public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; this.cachedStringValues = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValues == null) { cachedStringValues = values.toArray(new String[] { }); } if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
class Header { private final String name; private final List<String> values; private String cachedStringValue; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); for (String value : values) { this.values.add(value); } } /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(values); } /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. Changes made to this array will not be reflected in the headers. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { return values.toArray(new String[] { }); } /** * Returns all values associated with this header, represented as an unmodifiable list of strings. * * @return An unmodifiable list containing all values associated with this header. */ public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
It would be better to iterate the values ourselves as `Arrays.asList` will box the values into an internal type then unbox it back to a newly cloned array
public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.addAll(Arrays.asList(values)); }
this.values.addAll(Arrays.asList(values));
public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); for (String value : values) { this.values.add(value); } }
class Header { private final String name; private List<String> values; private String cachedStringValue; private String[] cachedStringValues; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); } public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = values; } /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { checkCachedStringValue(); return cachedStringValues; } public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; this.cachedStringValues = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValues == null) { cachedStringValues = values.toArray(new String[] { }); } if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
class Header { private final String name; private final List<String> values; private String cachedStringValue; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); } /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(values); } /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. Changes made to this array will not be reflected in the headers. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { return values.toArray(new String[] { }); } /** * Returns all values associated with this header, represented as an unmodifiable list of strings. * * @return An unmodifiable list containing all values associated with this header. */ public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
Should we prevent `null` values here as this would cause downstream issues
public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = values; }
Objects.requireNonNull(name, "'name' cannot be null.");
public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(values); }
class Header { private final String name; private List<String> values; private String cachedStringValue; private String[] cachedStringValues; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); } public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.addAll(Arrays.asList(values)); } /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { checkCachedStringValue(); return cachedStringValues; } public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; this.cachedStringValues = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValues == null) { cachedStringValues = values.toArray(new String[] { }); } if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
class Header { private final String name; private final List<String> values; private String cachedStringValue; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); } /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); for (String value : values) { this.values.add(value); } } /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. Changes made to this array will not be reflected in the headers. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { return values.toArray(new String[] { }); } /** * Returns all values associated with this header, represented as an unmodifiable list of strings. * * @return An unmodifiable list containing all values associated with this header. */ public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
Funnily enough this is what I initially had, but IntelliJ warned to change it to above so I let it. Reverted it now.
public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.addAll(Arrays.asList(values)); }
this.values.addAll(Arrays.asList(values));
public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); for (String value : values) { this.values.add(value); } }
class Header { private final String name; private List<String> values; private String cachedStringValue; private String[] cachedStringValues; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); } public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = values; } /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { checkCachedStringValue(); return cachedStringValues; } public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; this.cachedStringValues = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValues == null) { cachedStringValues = values.toArray(new String[] { }); } if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
class Header { private final String name; private final List<String> values; private String cachedStringValue; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); } /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(values); } /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. Changes made to this array will not be reflected in the headers. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { return values.toArray(new String[] { }); } /** * Returns all values associated with this header, represented as an unmodifiable list of strings. * * @return An unmodifiable list containing all values associated with this header. */ public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
I'm ok with stripping out null values if you think this is to the spec?
public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = values; }
Objects.requireNonNull(name, "'name' cannot be null.");
public Header(String name, List<String> values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(values); }
class Header { private final String name; private List<String> values; private String cachedStringValue; private String[] cachedStringValues; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); } public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.addAll(Arrays.asList(values)); } /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { checkCachedStringValue(); return cachedStringValues; } public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; this.cachedStringValues = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValues == null) { cachedStringValues = values.toArray(new String[] { }); } if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
class Header { private final String name; private final List<String> values; private String cachedStringValue; /** * Create a Header instance using the provided name and value. * * @param name the name of the header. * @param value the value of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String value) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); this.values.add(value); } /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ public Header(String name, String... values) { Objects.requireNonNull(name, "'name' cannot be null."); this.name = name; this.values = new LinkedList<>(); for (String value : values) { this.values.add(value); } } /** * Create a Header instance using the provided name and values. * * @param name the name of the header. * @param values the values of the header. * @throws NullPointerException if {@code name} is null. */ /** * Gets the header name. * * @return the name of this {@link Header} */ public String getName() { return name; } /** * Gets the combined, comma-separated value of this {@link Header}, taking into account all values provided. * * @return the value of this Header */ public String getValue() { checkCachedStringValue(); return cachedStringValue; } /** * Gets the comma separated value as an array. Changes made to this array will not be reflected in the headers. * * @return the values of this {@link Header} that are separated by a comma */ public String[] getValues() { return values.toArray(new String[] { }); } /** * Returns all values associated with this header, represented as an unmodifiable list of strings. * * @return An unmodifiable list containing all values associated with this header. */ public List<String> getValuesList() { return Collections.unmodifiableList(values); } /** * Add a new value to the end of the Header. * * @param value the value to add */ public void addValue(String value) { this.values.add(value); this.cachedStringValue = null; } /** * Gets the String representation of the header. * * @return the String representation of this Header. */ @Override public String toString() { checkCachedStringValue(); return name + ":" + cachedStringValue; } private void checkCachedStringValue() { if (cachedStringValue == null) { cachedStringValue = String.join(",", values); } } }
Done
public void splitQueryContinuationToken() { String containerId = "splittestcontainer_" + UUID.randomUUID(); int itemCount = 20; CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client); List<TestObject> testObjects = insertDocuments(itemCount, Arrays.asList("CA", "US"), container); List<String> sortedObjects = testObjects.stream() .sorted(Comparator.comparing(TestObject::getProp)) .map(TestObject::getId) .collect(Collectors.toList()); String query = "Select * from c"; String orderByQuery = "select * from c order by c.prop"; List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, asyncDocumentClient); String requestContinuation = null; String orderByRequestContinuation = null; int preferredPageSize = 15; ArrayList<TestObject> resultList = new ArrayList<>(); ArrayList<TestObject> orderByResultList = new ArrayList<>(); FeedResponse<TestObject> jsonNodeFeedResponse = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert jsonNodeFeedResponse != null; resultList.addAll(jsonNodeFeedResponse.getResults()); requestContinuation = jsonNodeFeedResponse.getContinuationToken(); FeedResponse<TestObject> orderByFeedResponse = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert orderByFeedResponse != null; orderByResultList.addAll(orderByFeedResponse.getResults()); orderByRequestContinuation = orderByFeedResponse.getContinuationToken(); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { e.printStackTrace(); } throughputResponse = container.readThroughput().block(); } logger.info("Resuming query from the continuation"); List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, asyncDocumentClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); container.readItem(testObjects.get(0).getId(), new PartitionKey(testObjects.get(0).getMypk()), JsonNode.class).block(); Flux<FeedResponse<TestObject>> feedResponseFlux = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(requestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : feedResponseFlux.toIterable()) { resultList.addAll(nodeFeedResponse.getResults()); } Flux<FeedResponse<TestObject>> orderfeedResponseFlux = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(orderByRequestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : orderfeedResponseFlux.toIterable()) { orderByResultList.addAll(nodeFeedResponse.getResults()); } List<String> sourceIds = testObjects.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> resultIds = resultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> orderResultIds = orderByResultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds) .as("Resuming query from continuation token after split validated"); assertThat(orderResultIds).containsExactlyElementsOf(sortedObjects) .as("Resuming orderby query from continuation token after split validated"); container.delete().block(); }
} catch (InterruptedException e) {
public void splitQueryContinuationToken() throws Exception { String containerId = "splittestcontainer_" + UUID.randomUUID(); int itemCount = 20; CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client); List<TestObject> testObjects = insertDocuments(itemCount, Arrays.asList("CA", "US"), container); List<String> sortedObjects = testObjects.stream() .sorted(Comparator.comparing(TestObject::getProp)) .map(TestObject::getId) .collect(Collectors.toList()); String query = "Select * from c"; String orderByQuery = "select * from c order by c.prop"; List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, asyncDocumentClient); String requestContinuation = null; String orderByRequestContinuation = null; int preferredPageSize = 15; ArrayList<TestObject> resultList = new ArrayList<>(); ArrayList<TestObject> orderByResultList = new ArrayList<>(); FeedResponse<TestObject> jsonNodeFeedResponse = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert jsonNodeFeedResponse != null; resultList.addAll(jsonNodeFeedResponse.getResults()); requestContinuation = jsonNodeFeedResponse.getContinuationToken(); FeedResponse<TestObject> orderByFeedResponse = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert orderByFeedResponse != null; orderByResultList.addAll(orderByFeedResponse.getResults()); orderByRequestContinuation = orderByFeedResponse.getContinuationToken(); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); Thread.sleep(10 * 1000); throughputResponse = container.readThroughput().block(); } logger.info("Resuming query from the continuation"); List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, asyncDocumentClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); container.readItem(testObjects.get(0).getId(), new PartitionKey(testObjects.get(0).getMypk()), JsonNode.class).block(); Flux<FeedResponse<TestObject>> feedResponseFlux = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(requestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : feedResponseFlux.toIterable()) { resultList.addAll(nodeFeedResponse.getResults()); } Flux<FeedResponse<TestObject>> orderfeedResponseFlux = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(orderByRequestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : orderfeedResponseFlux.toIterable()) { orderByResultList.addAll(nodeFeedResponse.getResults()); } List<String> sourceIds = testObjects.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> resultIds = resultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> orderResultIds = orderByResultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds) .as("Resuming query from continuation token after split validated"); assertThat(orderResultIds).containsExactlyElementsOf(sortedObjects) .as("Resuming orderby query from continuation token after split validated"); container.delete().block(); }
class QueryValidationTests extends TestSuiteBase { private static final int DEFAULT_NUM_DOCUMENTS = 1000; private static final int DEFAULT_PAGE_SIZE = 100; private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdContainer; private Random random; private CosmosAsyncClient client; private List<TestObject> createdDocuments = new ArrayList<>(); @Factory(dataProvider = "clientBuildersWithDirectSession") public QueryValidationTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); random = new Random(); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); client = this.getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdContainer = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdContainer); createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer)); } @Test(groups = {"unit"}, priority = 1) public void queryPlanCacheEnabledFlag() { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder(); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); assertThat(Configs.isQueryPlanCachingEnabled()).isTrue(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQuery() { /* The idea here is to query documents in pages, query all the documents(with pagesize as num_documents and compare the results. */ String query = "select * from c order by c.prop ASC"; queryWithOrderByAndAssert( DEFAULT_PAGE_SIZE, DEFAULT_NUM_DOCUMENTS, query, createdContainer, d -> d.getProp(), createdDocuments); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQueryForLargeCollection() { CosmosContainerProperties containerProperties = getCollectionDefinition(); createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(100000), new CosmosContainerRequestOptions() ).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerProperties.getId()); int partitionDocCount = 5; int pageSize = partitionDocCount + 1; String partition1Key = UUID.randomUUID().toString(); String partition2Key = UUID.randomUUID().toString(); List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition1Key), container)); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition2Key), container)); String query = String.format( "select * from c where c.mypk in ('%s', '%s') order by c.constantProp DESC", partition1Key, partition2Key); queryWithOrderByAndAssert( pageSize, partitionDocCount * 2, query, container, d -> d.getConstantProp(), documentsInserted); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryOptionNullValidation() { String query = "Select top 1 * from c"; FeedResponse<CosmosDatabaseProperties> databases = client.queryDatabases(query, null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); databases = client.queryDatabases(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); FeedResponse<CosmosContainerProperties> containers = createdDatabase.readAllContainers(null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isGreaterThanOrEqualTo(1); containers = createdDatabase.queryContainers(query, null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); containers = createdDatabase.queryContainers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); CosmosUserProperties userProperties = new CosmosUserProperties(); userProperties.setId(UUID.randomUUID().toString()); createdDatabase.createUser(userProperties).block(); FeedResponse<CosmosUserProperties> users = createdDatabase.queryUsers(query, null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); users = createdDatabase.queryUsers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); CosmosPermissionProperties cosmosPermissionProperties = new CosmosPermissionProperties(); cosmosPermissionProperties.setContainerName(createdContainer.getId()); cosmosPermissionProperties.setPermissionMode(PermissionMode.READ); cosmosPermissionProperties.setId(UUID.randomUUID().toString()); createdDatabase.getUser(userProperties.getId()).createPermission(cosmosPermissionProperties, null).block(); FeedResponse<CosmosPermissionProperties> permissions = createdDatabase.getUser(userProperties.getId()).queryPermissions(query, null).byPage(1).blockFirst(); assertThat(permissions.getResults().size()).isEqualTo(1); FeedResponse<TestObject> items = createdContainer.queryItems(query, null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); items = createdContainer.queryItems(new SqlQuerySpec(query), null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); createdContainer.getScripts().createStoredProcedure(getCosmosStoredProcedureProperties()).block(); createdContainer.getScripts().createTrigger(getCosmosTriggerProperties()).block(); createdContainer.getScripts().createUserDefinedFunction(getCosmosUserDefinedFunctionProperties()).block(); FeedResponse<CosmosStoredProcedureProperties> sprocs = createdContainer.getScripts().queryStoredProcedures(query, null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); sprocs = createdContainer.getScripts().queryStoredProcedures(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); FeedResponse<CosmosTriggerProperties> triggers = createdContainer.getScripts().queryTriggers(query, null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); triggers = createdContainer.getScripts().queryTriggers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); FeedResponse<CosmosUserDefinedFunctionProperties> udfs = createdContainer.getScripts().queryUserDefinedFunctions(query, null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); udfs = createdContainer.getScripts().queryUserDefinedFunctions(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); try { createdContainer.queryConflicts(query, null).byPage(1).blockFirst(); } catch (CosmosException exception) { assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); } createdContainer.readAllConflicts(null).byPage(1).blockFirst(); } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c "}, new Object[] { "select * from c order by c.prop ASC"}, }; } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionCorrectness(String query) { String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer)); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); List<TestObject> values1 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(contextClient.getQueryPlanCache().containsKey(query)).isTrue(); List<TestObject> values2 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).containsExactlyElementsOf(ids2); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionParameterizedQueriesCorrectness() { SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(); sqlQuerySpec.setQueryText("select * from c where c.id = @id"); String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); List<TestObject> pk2Docs = this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer); documentsInserted.addAll(pk2Docs); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(0).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); List<TestObject> values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(1).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isTrue(); List<TestObject> values2 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).doesNotContainAnyElementsOf(ids2); sqlQuerySpec.setQueryText("select top @top * from c"); int topValue = 2; sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@top", 2))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 10) @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private <T> List<T> queryAndGetResults(SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> type) { CosmosPagedFlux<T> queryPagedFlux = createdContainer.queryItems(querySpec, options, type); TestSubscriber<T> testSubscriber = new TestSubscriber<>(); queryPagedFlux.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); return testSubscriber.values(); } private <T> List<T> queryWithContinuationTokens(String query, int pageSize, CosmosAsyncContainer container, Class<T> klass) { logger.info("querying: " + query); String requestContinuation = null; List<T> receivedDocuments = new ArrayList<>(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); do { CosmosPagedFlux<T> queryPagedFlux = container.queryItems(query, options, klass); FeedResponse<T> firstPage = queryPagedFlux.byPage(requestContinuation, pageSize).blockFirst(); assert firstPage != null; requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); } while (requestContinuation != null); return receivedDocuments; } private TestObject getDocumentDefinition(String documentId, String partitionKey) { int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2); TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey); return doc; } private <T> List<String> sortTestObjectsAndCollectIds( List<TestObject> createdDocuments, Function<TestObject, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> d.getId()).collect(Collectors.toList()); } private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) { List<TestObject> documentsToInsert = new ArrayList<>(); for (int i = 0; i < documentCount; i++) { documentsToInsert.add( getDocumentDefinition( UUID.randomUUID().toString(), partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size())))); } List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert); waitIfNeededForReplicasToCatchUp(this.getClientBuilder()); return documentInserted; } private <T extends Comparable<T>> void queryWithOrderByAndAssert( int pageSize, int documentCount, String query, CosmosAsyncContainer container, Function<TestObject, T> extractProp, List<TestObject> documentsInserted) { List<TestObject> documentsPaged = queryWithContinuationTokens(query, pageSize, container, TestObject.class); List<TestObject> allDocuments = queryWithContinuationTokens(query, documentCount, container, TestObject.class); Comparator<T> validatorComparator = Comparator.nullsFirst(Comparator.<T>naturalOrder()); List<String> expectedResourceIds = sortTestObjectsAndCollectIds(documentsInserted, extractProp, validatorComparator); List<String> docIds1 = documentsPaged.stream().map(TestObject::getId).collect(Collectors.toList()); List<String> docIds2 = allDocuments.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(docIds2).containsExactlyInAnyOrderElementsOf(expectedResourceIds); assertThat(docIds1).containsExactlyElementsOf(docIds2); } private static CosmosUserDefinedFunctionProperties getCosmosUserDefinedFunctionProperties() { CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return udf; } private static CosmosTriggerProperties getCosmosTriggerProperties() { CosmosTriggerProperties trigger = new CosmosTriggerProperties(UUID.randomUUID().toString(), "function() {var " + "x = 10;}"); trigger.setTriggerOperation(TriggerOperation.CREATE); trigger.setTriggerType(TriggerType.PRE); return trigger; } private static CosmosStoredProcedureProperties getCosmosStoredProcedureProperties() { CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return storedProcedureDef; } static class TestObject { String id; String name; int prop; String mypk; String constantProp = "constantProp"; public TestObject() { } public TestObject(String id, String name, int prop, String mypk) { this.id = id; this.name = name; this.prop = prop; this.mypk = mypk; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getProp() { return prop; } public void setProp(final int prop) { this.prop = prop; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public String getConstantProp() { return constantProp; } } }
class QueryValidationTests extends TestSuiteBase { private static final int DEFAULT_NUM_DOCUMENTS = 1000; private static final int DEFAULT_PAGE_SIZE = 100; private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdContainer; private Random random; private CosmosAsyncClient client; private List<TestObject> createdDocuments = new ArrayList<>(); @Factory(dataProvider = "clientBuildersWithDirectSession") public QueryValidationTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); random = new Random(); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); client = this.getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdContainer = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdContainer); createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer)); } @Test(groups = {"unit"}, priority = 1) public void queryPlanCacheEnabledFlag() { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder(); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); assertThat(Configs.isQueryPlanCachingEnabled()).isTrue(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQuery() { /* The idea here is to query documents in pages, query all the documents(with pagesize as num_documents and compare the results. */ String query = "select * from c order by c.prop ASC"; queryWithOrderByAndAssert( DEFAULT_PAGE_SIZE, DEFAULT_NUM_DOCUMENTS, query, createdContainer, d -> d.getProp(), createdDocuments); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQueryForLargeCollection() { CosmosContainerProperties containerProperties = getCollectionDefinition(); createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(100000), new CosmosContainerRequestOptions() ).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerProperties.getId()); int partitionDocCount = 5; int pageSize = partitionDocCount + 1; String partition1Key = UUID.randomUUID().toString(); String partition2Key = UUID.randomUUID().toString(); List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition1Key), container)); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition2Key), container)); String query = String.format( "select * from c where c.mypk in ('%s', '%s') order by c.constantProp DESC", partition1Key, partition2Key); queryWithOrderByAndAssert( pageSize, partitionDocCount * 2, query, container, d -> d.getConstantProp(), documentsInserted); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryOptionNullValidation() { String query = "Select top 1 * from c"; FeedResponse<CosmosDatabaseProperties> databases = client.queryDatabases(query, null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); databases = client.queryDatabases(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); FeedResponse<CosmosContainerProperties> containers = createdDatabase.readAllContainers(null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isGreaterThanOrEqualTo(1); containers = createdDatabase.queryContainers(query, null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); containers = createdDatabase.queryContainers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); CosmosUserProperties userProperties = new CosmosUserProperties(); userProperties.setId(UUID.randomUUID().toString()); createdDatabase.createUser(userProperties).block(); FeedResponse<CosmosUserProperties> users = createdDatabase.queryUsers(query, null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); users = createdDatabase.queryUsers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); CosmosPermissionProperties cosmosPermissionProperties = new CosmosPermissionProperties(); cosmosPermissionProperties.setContainerName(createdContainer.getId()); cosmosPermissionProperties.setPermissionMode(PermissionMode.READ); cosmosPermissionProperties.setId(UUID.randomUUID().toString()); createdDatabase.getUser(userProperties.getId()).createPermission(cosmosPermissionProperties, null).block(); FeedResponse<CosmosPermissionProperties> permissions = createdDatabase.getUser(userProperties.getId()).queryPermissions(query, null).byPage(1).blockFirst(); assertThat(permissions.getResults().size()).isEqualTo(1); FeedResponse<TestObject> items = createdContainer.queryItems(query, null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); items = createdContainer.queryItems(new SqlQuerySpec(query), null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); createdContainer.getScripts().createStoredProcedure(getCosmosStoredProcedureProperties()).block(); createdContainer.getScripts().createTrigger(getCosmosTriggerProperties()).block(); createdContainer.getScripts().createUserDefinedFunction(getCosmosUserDefinedFunctionProperties()).block(); FeedResponse<CosmosStoredProcedureProperties> sprocs = createdContainer.getScripts().queryStoredProcedures(query, null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); sprocs = createdContainer.getScripts().queryStoredProcedures(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); FeedResponse<CosmosTriggerProperties> triggers = createdContainer.getScripts().queryTriggers(query, null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); triggers = createdContainer.getScripts().queryTriggers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); FeedResponse<CosmosUserDefinedFunctionProperties> udfs = createdContainer.getScripts().queryUserDefinedFunctions(query, null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); udfs = createdContainer.getScripts().queryUserDefinedFunctions(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); try { createdContainer.queryConflicts(query, null).byPage(1).blockFirst(); } catch (CosmosException exception) { assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); } createdContainer.readAllConflicts(null).byPage(1).blockFirst(); } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c "}, new Object[] { "select * from c order by c.prop ASC"}, }; } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionCorrectness(String query) { String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer)); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); List<TestObject> values1 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(contextClient.getQueryPlanCache().containsKey(query)).isTrue(); List<TestObject> values2 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).containsExactlyElementsOf(ids2); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionParameterizedQueriesCorrectness() { SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(); sqlQuerySpec.setQueryText("select * from c where c.id = @id"); String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); List<TestObject> pk2Docs = this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer); documentsInserted.addAll(pk2Docs); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(0).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); List<TestObject> values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(1).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isTrue(); List<TestObject> values2 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).doesNotContainAnyElementsOf(ids2); sqlQuerySpec.setQueryText("select top @top * from c"); int topValue = 2; sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@top", 2))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 10) @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private <T> List<T> queryAndGetResults(SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> type) { CosmosPagedFlux<T> queryPagedFlux = createdContainer.queryItems(querySpec, options, type); TestSubscriber<T> testSubscriber = new TestSubscriber<>(); queryPagedFlux.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); return testSubscriber.values(); } private <T> List<T> queryWithContinuationTokens(String query, int pageSize, CosmosAsyncContainer container, Class<T> klass) { logger.info("querying: " + query); String requestContinuation = null; List<T> receivedDocuments = new ArrayList<>(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); do { CosmosPagedFlux<T> queryPagedFlux = container.queryItems(query, options, klass); FeedResponse<T> firstPage = queryPagedFlux.byPage(requestContinuation, pageSize).blockFirst(); assert firstPage != null; requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); } while (requestContinuation != null); return receivedDocuments; } private TestObject getDocumentDefinition(String documentId, String partitionKey) { int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2); TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey); return doc; } private <T> List<String> sortTestObjectsAndCollectIds( List<TestObject> createdDocuments, Function<TestObject, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> d.getId()).collect(Collectors.toList()); } private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) { List<TestObject> documentsToInsert = new ArrayList<>(); for (int i = 0; i < documentCount; i++) { documentsToInsert.add( getDocumentDefinition( UUID.randomUUID().toString(), partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size())))); } List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert); waitIfNeededForReplicasToCatchUp(this.getClientBuilder()); return documentInserted; } private <T extends Comparable<T>> void queryWithOrderByAndAssert( int pageSize, int documentCount, String query, CosmosAsyncContainer container, Function<TestObject, T> extractProp, List<TestObject> documentsInserted) { List<TestObject> documentsPaged = queryWithContinuationTokens(query, pageSize, container, TestObject.class); List<TestObject> allDocuments = queryWithContinuationTokens(query, documentCount, container, TestObject.class); Comparator<T> validatorComparator = Comparator.nullsFirst(Comparator.<T>naturalOrder()); List<String> expectedResourceIds = sortTestObjectsAndCollectIds(documentsInserted, extractProp, validatorComparator); List<String> docIds1 = documentsPaged.stream().map(TestObject::getId).collect(Collectors.toList()); List<String> docIds2 = allDocuments.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(docIds2).containsExactlyInAnyOrderElementsOf(expectedResourceIds); assertThat(docIds1).containsExactlyElementsOf(docIds2); } private static CosmosUserDefinedFunctionProperties getCosmosUserDefinedFunctionProperties() { CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return udf; } private static CosmosTriggerProperties getCosmosTriggerProperties() { CosmosTriggerProperties trigger = new CosmosTriggerProperties(UUID.randomUUID().toString(), "function() {var " + "x = 10;}"); trigger.setTriggerOperation(TriggerOperation.CREATE); trigger.setTriggerType(TriggerType.PRE); return trigger; } private static CosmosStoredProcedureProperties getCosmosStoredProcedureProperties() { CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return storedProcedureDef; } static class TestObject { String id; String name; int prop; String mypk; String constantProp = "constantProp"; public TestObject() { } public TestObject(String id, String name, int prop, String mypk) { this.id = id; this.name = name; this.prop = prop; this.mypk = mypk; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getProp() { return prop; } public void setProp(final int prop) { this.prop = prop; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public String getConstantProp() { return constantProp; } } }
we shouldn't catch InterruptedEXception. if this is thrown it should naturally kill the test.
public void splitQueryContinuationToken() { String containerId = "splittestcontainer_" + UUID.randomUUID(); int itemCount = 20; CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client); List<TestObject> testObjects = insertDocuments(itemCount, Arrays.asList("CA", "US"), container); List<String> sortedObjects = testObjects.stream() .sorted(Comparator.comparing(TestObject::getProp)) .map(TestObject::getId) .collect(Collectors.toList()); String query = "Select * from c"; String orderByQuery = "select * from c order by c.prop"; List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, asyncDocumentClient); String requestContinuation = null; String orderByRequestContinuation = null; int preferredPageSize = 15; ArrayList<TestObject> resultList = new ArrayList<>(); ArrayList<TestObject> orderByResultList = new ArrayList<>(); FeedResponse<TestObject> jsonNodeFeedResponse = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert jsonNodeFeedResponse != null; resultList.addAll(jsonNodeFeedResponse.getResults()); requestContinuation = jsonNodeFeedResponse.getContinuationToken(); FeedResponse<TestObject> orderByFeedResponse = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert orderByFeedResponse != null; orderByResultList.addAll(orderByFeedResponse.getResults()); orderByRequestContinuation = orderByFeedResponse.getContinuationToken(); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { e.printStackTrace(); } throughputResponse = container.readThroughput().block(); } logger.info("Resuming query from the continuation"); List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, asyncDocumentClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); container.readItem(testObjects.get(0).getId(), new PartitionKey(testObjects.get(0).getMypk()), JsonNode.class).block(); Flux<FeedResponse<TestObject>> feedResponseFlux = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(requestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : feedResponseFlux.toIterable()) { resultList.addAll(nodeFeedResponse.getResults()); } Flux<FeedResponse<TestObject>> orderfeedResponseFlux = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(orderByRequestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : orderfeedResponseFlux.toIterable()) { orderByResultList.addAll(nodeFeedResponse.getResults()); } List<String> sourceIds = testObjects.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> resultIds = resultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> orderResultIds = orderByResultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds) .as("Resuming query from continuation token after split validated"); assertThat(orderResultIds).containsExactlyElementsOf(sortedObjects) .as("Resuming orderby query from continuation token after split validated"); container.delete().block(); }
} catch (InterruptedException e) {
public void splitQueryContinuationToken() throws Exception { String containerId = "splittestcontainer_" + UUID.randomUUID(); int itemCount = 20; CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client); List<TestObject> testObjects = insertDocuments(itemCount, Arrays.asList("CA", "US"), container); List<String> sortedObjects = testObjects.stream() .sorted(Comparator.comparing(TestObject::getProp)) .map(TestObject::getId) .collect(Collectors.toList()); String query = "Select * from c"; String orderByQuery = "select * from c order by c.prop"; List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, asyncDocumentClient); String requestContinuation = null; String orderByRequestContinuation = null; int preferredPageSize = 15; ArrayList<TestObject> resultList = new ArrayList<>(); ArrayList<TestObject> orderByResultList = new ArrayList<>(); FeedResponse<TestObject> jsonNodeFeedResponse = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert jsonNodeFeedResponse != null; resultList.addAll(jsonNodeFeedResponse.getResults()); requestContinuation = jsonNodeFeedResponse.getContinuationToken(); FeedResponse<TestObject> orderByFeedResponse = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert orderByFeedResponse != null; orderByResultList.addAll(orderByFeedResponse.getResults()); orderByRequestContinuation = orderByFeedResponse.getContinuationToken(); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); Thread.sleep(10 * 1000); throughputResponse = container.readThroughput().block(); } logger.info("Resuming query from the continuation"); List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, asyncDocumentClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); container.readItem(testObjects.get(0).getId(), new PartitionKey(testObjects.get(0).getMypk()), JsonNode.class).block(); Flux<FeedResponse<TestObject>> feedResponseFlux = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(requestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : feedResponseFlux.toIterable()) { resultList.addAll(nodeFeedResponse.getResults()); } Flux<FeedResponse<TestObject>> orderfeedResponseFlux = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(orderByRequestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : orderfeedResponseFlux.toIterable()) { orderByResultList.addAll(nodeFeedResponse.getResults()); } List<String> sourceIds = testObjects.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> resultIds = resultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> orderResultIds = orderByResultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds) .as("Resuming query from continuation token after split validated"); assertThat(orderResultIds).containsExactlyElementsOf(sortedObjects) .as("Resuming orderby query from continuation token after split validated"); container.delete().block(); }
class QueryValidationTests extends TestSuiteBase { private static final int DEFAULT_NUM_DOCUMENTS = 1000; private static final int DEFAULT_PAGE_SIZE = 100; private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdContainer; private Random random; private CosmosAsyncClient client; private List<TestObject> createdDocuments = new ArrayList<>(); @Factory(dataProvider = "clientBuildersWithDirectSession") public QueryValidationTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); random = new Random(); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); client = this.getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdContainer = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdContainer); createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer)); } @Test(groups = {"unit"}, priority = 1) public void queryPlanCacheEnabledFlag() { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder(); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); assertThat(Configs.isQueryPlanCachingEnabled()).isTrue(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQuery() { /* The idea here is to query documents in pages, query all the documents(with pagesize as num_documents and compare the results. */ String query = "select * from c order by c.prop ASC"; queryWithOrderByAndAssert( DEFAULT_PAGE_SIZE, DEFAULT_NUM_DOCUMENTS, query, createdContainer, d -> d.getProp(), createdDocuments); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQueryForLargeCollection() { CosmosContainerProperties containerProperties = getCollectionDefinition(); createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(100000), new CosmosContainerRequestOptions() ).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerProperties.getId()); int partitionDocCount = 5; int pageSize = partitionDocCount + 1; String partition1Key = UUID.randomUUID().toString(); String partition2Key = UUID.randomUUID().toString(); List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition1Key), container)); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition2Key), container)); String query = String.format( "select * from c where c.mypk in ('%s', '%s') order by c.constantProp DESC", partition1Key, partition2Key); queryWithOrderByAndAssert( pageSize, partitionDocCount * 2, query, container, d -> d.getConstantProp(), documentsInserted); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryOptionNullValidation() { String query = "Select top 1 * from c"; FeedResponse<CosmosDatabaseProperties> databases = client.queryDatabases(query, null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); databases = client.queryDatabases(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); FeedResponse<CosmosContainerProperties> containers = createdDatabase.readAllContainers(null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isGreaterThanOrEqualTo(1); containers = createdDatabase.queryContainers(query, null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); containers = createdDatabase.queryContainers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); CosmosUserProperties userProperties = new CosmosUserProperties(); userProperties.setId(UUID.randomUUID().toString()); createdDatabase.createUser(userProperties).block(); FeedResponse<CosmosUserProperties> users = createdDatabase.queryUsers(query, null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); users = createdDatabase.queryUsers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); CosmosPermissionProperties cosmosPermissionProperties = new CosmosPermissionProperties(); cosmosPermissionProperties.setContainerName(createdContainer.getId()); cosmosPermissionProperties.setPermissionMode(PermissionMode.READ); cosmosPermissionProperties.setId(UUID.randomUUID().toString()); createdDatabase.getUser(userProperties.getId()).createPermission(cosmosPermissionProperties, null).block(); FeedResponse<CosmosPermissionProperties> permissions = createdDatabase.getUser(userProperties.getId()).queryPermissions(query, null).byPage(1).blockFirst(); assertThat(permissions.getResults().size()).isEqualTo(1); FeedResponse<TestObject> items = createdContainer.queryItems(query, null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); items = createdContainer.queryItems(new SqlQuerySpec(query), null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); createdContainer.getScripts().createStoredProcedure(getCosmosStoredProcedureProperties()).block(); createdContainer.getScripts().createTrigger(getCosmosTriggerProperties()).block(); createdContainer.getScripts().createUserDefinedFunction(getCosmosUserDefinedFunctionProperties()).block(); FeedResponse<CosmosStoredProcedureProperties> sprocs = createdContainer.getScripts().queryStoredProcedures(query, null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); sprocs = createdContainer.getScripts().queryStoredProcedures(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); FeedResponse<CosmosTriggerProperties> triggers = createdContainer.getScripts().queryTriggers(query, null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); triggers = createdContainer.getScripts().queryTriggers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); FeedResponse<CosmosUserDefinedFunctionProperties> udfs = createdContainer.getScripts().queryUserDefinedFunctions(query, null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); udfs = createdContainer.getScripts().queryUserDefinedFunctions(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); try { createdContainer.queryConflicts(query, null).byPage(1).blockFirst(); } catch (CosmosException exception) { assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); } createdContainer.readAllConflicts(null).byPage(1).blockFirst(); } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c "}, new Object[] { "select * from c order by c.prop ASC"}, }; } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionCorrectness(String query) { String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer)); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); List<TestObject> values1 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(contextClient.getQueryPlanCache().containsKey(query)).isTrue(); List<TestObject> values2 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).containsExactlyElementsOf(ids2); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionParameterizedQueriesCorrectness() { SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(); sqlQuerySpec.setQueryText("select * from c where c.id = @id"); String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); List<TestObject> pk2Docs = this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer); documentsInserted.addAll(pk2Docs); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(0).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); List<TestObject> values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(1).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isTrue(); List<TestObject> values2 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).doesNotContainAnyElementsOf(ids2); sqlQuerySpec.setQueryText("select top @top * from c"); int topValue = 2; sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@top", 2))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 10) @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private <T> List<T> queryAndGetResults(SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> type) { CosmosPagedFlux<T> queryPagedFlux = createdContainer.queryItems(querySpec, options, type); TestSubscriber<T> testSubscriber = new TestSubscriber<>(); queryPagedFlux.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); return testSubscriber.values(); } private <T> List<T> queryWithContinuationTokens(String query, int pageSize, CosmosAsyncContainer container, Class<T> klass) { logger.info("querying: " + query); String requestContinuation = null; List<T> receivedDocuments = new ArrayList<>(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); do { CosmosPagedFlux<T> queryPagedFlux = container.queryItems(query, options, klass); FeedResponse<T> firstPage = queryPagedFlux.byPage(requestContinuation, pageSize).blockFirst(); assert firstPage != null; requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); } while (requestContinuation != null); return receivedDocuments; } private TestObject getDocumentDefinition(String documentId, String partitionKey) { int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2); TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey); return doc; } private <T> List<String> sortTestObjectsAndCollectIds( List<TestObject> createdDocuments, Function<TestObject, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> d.getId()).collect(Collectors.toList()); } private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) { List<TestObject> documentsToInsert = new ArrayList<>(); for (int i = 0; i < documentCount; i++) { documentsToInsert.add( getDocumentDefinition( UUID.randomUUID().toString(), partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size())))); } List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert); waitIfNeededForReplicasToCatchUp(this.getClientBuilder()); return documentInserted; } private <T extends Comparable<T>> void queryWithOrderByAndAssert( int pageSize, int documentCount, String query, CosmosAsyncContainer container, Function<TestObject, T> extractProp, List<TestObject> documentsInserted) { List<TestObject> documentsPaged = queryWithContinuationTokens(query, pageSize, container, TestObject.class); List<TestObject> allDocuments = queryWithContinuationTokens(query, documentCount, container, TestObject.class); Comparator<T> validatorComparator = Comparator.nullsFirst(Comparator.<T>naturalOrder()); List<String> expectedResourceIds = sortTestObjectsAndCollectIds(documentsInserted, extractProp, validatorComparator); List<String> docIds1 = documentsPaged.stream().map(TestObject::getId).collect(Collectors.toList()); List<String> docIds2 = allDocuments.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(docIds2).containsExactlyInAnyOrderElementsOf(expectedResourceIds); assertThat(docIds1).containsExactlyElementsOf(docIds2); } private static CosmosUserDefinedFunctionProperties getCosmosUserDefinedFunctionProperties() { CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return udf; } private static CosmosTriggerProperties getCosmosTriggerProperties() { CosmosTriggerProperties trigger = new CosmosTriggerProperties(UUID.randomUUID().toString(), "function() {var " + "x = 10;}"); trigger.setTriggerOperation(TriggerOperation.CREATE); trigger.setTriggerType(TriggerType.PRE); return trigger; } private static CosmosStoredProcedureProperties getCosmosStoredProcedureProperties() { CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return storedProcedureDef; } static class TestObject { String id; String name; int prop; String mypk; String constantProp = "constantProp"; public TestObject() { } public TestObject(String id, String name, int prop, String mypk) { this.id = id; this.name = name; this.prop = prop; this.mypk = mypk; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getProp() { return prop; } public void setProp(final int prop) { this.prop = prop; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public String getConstantProp() { return constantProp; } } }
class QueryValidationTests extends TestSuiteBase { private static final int DEFAULT_NUM_DOCUMENTS = 1000; private static final int DEFAULT_PAGE_SIZE = 100; private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdContainer; private Random random; private CosmosAsyncClient client; private List<TestObject> createdDocuments = new ArrayList<>(); @Factory(dataProvider = "clientBuildersWithDirectSession") public QueryValidationTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); random = new Random(); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); client = this.getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdContainer = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdContainer); createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer)); } @Test(groups = {"unit"}, priority = 1) public void queryPlanCacheEnabledFlag() { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder(); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); assertThat(Configs.isQueryPlanCachingEnabled()).isTrue(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQuery() { /* The idea here is to query documents in pages, query all the documents(with pagesize as num_documents and compare the results. */ String query = "select * from c order by c.prop ASC"; queryWithOrderByAndAssert( DEFAULT_PAGE_SIZE, DEFAULT_NUM_DOCUMENTS, query, createdContainer, d -> d.getProp(), createdDocuments); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQueryForLargeCollection() { CosmosContainerProperties containerProperties = getCollectionDefinition(); createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(100000), new CosmosContainerRequestOptions() ).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerProperties.getId()); int partitionDocCount = 5; int pageSize = partitionDocCount + 1; String partition1Key = UUID.randomUUID().toString(); String partition2Key = UUID.randomUUID().toString(); List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition1Key), container)); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition2Key), container)); String query = String.format( "select * from c where c.mypk in ('%s', '%s') order by c.constantProp DESC", partition1Key, partition2Key); queryWithOrderByAndAssert( pageSize, partitionDocCount * 2, query, container, d -> d.getConstantProp(), documentsInserted); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryOptionNullValidation() { String query = "Select top 1 * from c"; FeedResponse<CosmosDatabaseProperties> databases = client.queryDatabases(query, null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); databases = client.queryDatabases(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); FeedResponse<CosmosContainerProperties> containers = createdDatabase.readAllContainers(null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isGreaterThanOrEqualTo(1); containers = createdDatabase.queryContainers(query, null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); containers = createdDatabase.queryContainers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); CosmosUserProperties userProperties = new CosmosUserProperties(); userProperties.setId(UUID.randomUUID().toString()); createdDatabase.createUser(userProperties).block(); FeedResponse<CosmosUserProperties> users = createdDatabase.queryUsers(query, null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); users = createdDatabase.queryUsers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); CosmosPermissionProperties cosmosPermissionProperties = new CosmosPermissionProperties(); cosmosPermissionProperties.setContainerName(createdContainer.getId()); cosmosPermissionProperties.setPermissionMode(PermissionMode.READ); cosmosPermissionProperties.setId(UUID.randomUUID().toString()); createdDatabase.getUser(userProperties.getId()).createPermission(cosmosPermissionProperties, null).block(); FeedResponse<CosmosPermissionProperties> permissions = createdDatabase.getUser(userProperties.getId()).queryPermissions(query, null).byPage(1).blockFirst(); assertThat(permissions.getResults().size()).isEqualTo(1); FeedResponse<TestObject> items = createdContainer.queryItems(query, null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); items = createdContainer.queryItems(new SqlQuerySpec(query), null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); createdContainer.getScripts().createStoredProcedure(getCosmosStoredProcedureProperties()).block(); createdContainer.getScripts().createTrigger(getCosmosTriggerProperties()).block(); createdContainer.getScripts().createUserDefinedFunction(getCosmosUserDefinedFunctionProperties()).block(); FeedResponse<CosmosStoredProcedureProperties> sprocs = createdContainer.getScripts().queryStoredProcedures(query, null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); sprocs = createdContainer.getScripts().queryStoredProcedures(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); FeedResponse<CosmosTriggerProperties> triggers = createdContainer.getScripts().queryTriggers(query, null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); triggers = createdContainer.getScripts().queryTriggers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); FeedResponse<CosmosUserDefinedFunctionProperties> udfs = createdContainer.getScripts().queryUserDefinedFunctions(query, null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); udfs = createdContainer.getScripts().queryUserDefinedFunctions(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); try { createdContainer.queryConflicts(query, null).byPage(1).blockFirst(); } catch (CosmosException exception) { assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); } createdContainer.readAllConflicts(null).byPage(1).blockFirst(); } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c "}, new Object[] { "select * from c order by c.prop ASC"}, }; } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionCorrectness(String query) { String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer)); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); List<TestObject> values1 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(contextClient.getQueryPlanCache().containsKey(query)).isTrue(); List<TestObject> values2 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).containsExactlyElementsOf(ids2); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionParameterizedQueriesCorrectness() { SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(); sqlQuerySpec.setQueryText("select * from c where c.id = @id"); String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); List<TestObject> pk2Docs = this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer); documentsInserted.addAll(pk2Docs); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(0).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); List<TestObject> values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(1).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isTrue(); List<TestObject> values2 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).doesNotContainAnyElementsOf(ids2); sqlQuerySpec.setQueryText("select top @top * from c"); int topValue = 2; sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@top", 2))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 10) @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private <T> List<T> queryAndGetResults(SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> type) { CosmosPagedFlux<T> queryPagedFlux = createdContainer.queryItems(querySpec, options, type); TestSubscriber<T> testSubscriber = new TestSubscriber<>(); queryPagedFlux.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); return testSubscriber.values(); } private <T> List<T> queryWithContinuationTokens(String query, int pageSize, CosmosAsyncContainer container, Class<T> klass) { logger.info("querying: " + query); String requestContinuation = null; List<T> receivedDocuments = new ArrayList<>(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); do { CosmosPagedFlux<T> queryPagedFlux = container.queryItems(query, options, klass); FeedResponse<T> firstPage = queryPagedFlux.byPage(requestContinuation, pageSize).blockFirst(); assert firstPage != null; requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); } while (requestContinuation != null); return receivedDocuments; } private TestObject getDocumentDefinition(String documentId, String partitionKey) { int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2); TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey); return doc; } private <T> List<String> sortTestObjectsAndCollectIds( List<TestObject> createdDocuments, Function<TestObject, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> d.getId()).collect(Collectors.toList()); } private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) { List<TestObject> documentsToInsert = new ArrayList<>(); for (int i = 0; i < documentCount; i++) { documentsToInsert.add( getDocumentDefinition( UUID.randomUUID().toString(), partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size())))); } List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert); waitIfNeededForReplicasToCatchUp(this.getClientBuilder()); return documentInserted; } private <T extends Comparable<T>> void queryWithOrderByAndAssert( int pageSize, int documentCount, String query, CosmosAsyncContainer container, Function<TestObject, T> extractProp, List<TestObject> documentsInserted) { List<TestObject> documentsPaged = queryWithContinuationTokens(query, pageSize, container, TestObject.class); List<TestObject> allDocuments = queryWithContinuationTokens(query, documentCount, container, TestObject.class); Comparator<T> validatorComparator = Comparator.nullsFirst(Comparator.<T>naturalOrder()); List<String> expectedResourceIds = sortTestObjectsAndCollectIds(documentsInserted, extractProp, validatorComparator); List<String> docIds1 = documentsPaged.stream().map(TestObject::getId).collect(Collectors.toList()); List<String> docIds2 = allDocuments.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(docIds2).containsExactlyInAnyOrderElementsOf(expectedResourceIds); assertThat(docIds1).containsExactlyElementsOf(docIds2); } private static CosmosUserDefinedFunctionProperties getCosmosUserDefinedFunctionProperties() { CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return udf; } private static CosmosTriggerProperties getCosmosTriggerProperties() { CosmosTriggerProperties trigger = new CosmosTriggerProperties(UUID.randomUUID().toString(), "function() {var " + "x = 10;}"); trigger.setTriggerOperation(TriggerOperation.CREATE); trigger.setTriggerType(TriggerType.PRE); return trigger; } private static CosmosStoredProcedureProperties getCosmosStoredProcedureProperties() { CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return storedProcedureDef; } static class TestObject { String id; String name; int prop; String mypk; String constantProp = "constantProp"; public TestObject() { } public TestObject(String id, String name, int prop, String mypk) { this.id = id; this.name = name; this.prop = prop; this.mypk = mypk; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getProp() { return prop; } public void setProp(final int prop) { this.prop = prop; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public String getConstantProp() { return constantProp; } } }
> } [](start = 6, length = 3) nit: new line here
public void splitQueryContinuationToken() { String containerId = "splittestcontainer_" + UUID.randomUUID(); int itemCount = 20; CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client); List<TestObject> testObjects = insertDocuments(itemCount, Arrays.asList("CA", "US"), container); List<String> sortedObjects = testObjects.stream() .sorted(Comparator.comparing(TestObject::getProp)) .map(TestObject::getId) .collect(Collectors.toList()); String query = "Select * from c"; String orderByQuery = "select * from c order by c.prop"; List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, asyncDocumentClient); String requestContinuation = null; String orderByRequestContinuation = null; int preferredPageSize = 15; ArrayList<TestObject> resultList = new ArrayList<>(); ArrayList<TestObject> orderByResultList = new ArrayList<>(); FeedResponse<TestObject> jsonNodeFeedResponse = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert jsonNodeFeedResponse != null; resultList.addAll(jsonNodeFeedResponse.getResults()); requestContinuation = jsonNodeFeedResponse.getContinuationToken(); FeedResponse<TestObject> orderByFeedResponse = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert orderByFeedResponse != null; orderByResultList.addAll(orderByFeedResponse.getResults()); orderByRequestContinuation = orderByFeedResponse.getContinuationToken(); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { e.printStackTrace(); } throughputResponse = container.readThroughput().block(); } logger.info("Resuming query from the continuation"); List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, asyncDocumentClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); container.readItem(testObjects.get(0).getId(), new PartitionKey(testObjects.get(0).getMypk()), JsonNode.class).block(); Flux<FeedResponse<TestObject>> feedResponseFlux = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(requestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : feedResponseFlux.toIterable()) { resultList.addAll(nodeFeedResponse.getResults()); } Flux<FeedResponse<TestObject>> orderfeedResponseFlux = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(orderByRequestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : orderfeedResponseFlux.toIterable()) { orderByResultList.addAll(nodeFeedResponse.getResults()); } List<String> sourceIds = testObjects.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> resultIds = resultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> orderResultIds = orderByResultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds) .as("Resuming query from continuation token after split validated"); assertThat(orderResultIds).containsExactlyElementsOf(sortedObjects) .as("Resuming orderby query from continuation token after split validated"); container.delete().block(); }
}
public void splitQueryContinuationToken() throws Exception { String containerId = "splittestcontainer_" + UUID.randomUUID(); int itemCount = 20; CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client); List<TestObject> testObjects = insertDocuments(itemCount, Arrays.asList("CA", "US"), container); List<String> sortedObjects = testObjects.stream() .sorted(Comparator.comparing(TestObject::getProp)) .map(TestObject::getId) .collect(Collectors.toList()); String query = "Select * from c"; String orderByQuery = "select * from c order by c.prop"; List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, asyncDocumentClient); String requestContinuation = null; String orderByRequestContinuation = null; int preferredPageSize = 15; ArrayList<TestObject> resultList = new ArrayList<>(); ArrayList<TestObject> orderByResultList = new ArrayList<>(); FeedResponse<TestObject> jsonNodeFeedResponse = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert jsonNodeFeedResponse != null; resultList.addAll(jsonNodeFeedResponse.getResults()); requestContinuation = jsonNodeFeedResponse.getContinuationToken(); FeedResponse<TestObject> orderByFeedResponse = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert orderByFeedResponse != null; orderByResultList.addAll(orderByFeedResponse.getResults()); orderByRequestContinuation = orderByFeedResponse.getContinuationToken(); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); Thread.sleep(10 * 1000); throughputResponse = container.readThroughput().block(); } logger.info("Resuming query from the continuation"); List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, asyncDocumentClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); container.readItem(testObjects.get(0).getId(), new PartitionKey(testObjects.get(0).getMypk()), JsonNode.class).block(); Flux<FeedResponse<TestObject>> feedResponseFlux = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(requestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : feedResponseFlux.toIterable()) { resultList.addAll(nodeFeedResponse.getResults()); } Flux<FeedResponse<TestObject>> orderfeedResponseFlux = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(orderByRequestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : orderfeedResponseFlux.toIterable()) { orderByResultList.addAll(nodeFeedResponse.getResults()); } List<String> sourceIds = testObjects.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> resultIds = resultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> orderResultIds = orderByResultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds) .as("Resuming query from continuation token after split validated"); assertThat(orderResultIds).containsExactlyElementsOf(sortedObjects) .as("Resuming orderby query from continuation token after split validated"); container.delete().block(); }
class QueryValidationTests extends TestSuiteBase { private static final int DEFAULT_NUM_DOCUMENTS = 1000; private static final int DEFAULT_PAGE_SIZE = 100; private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdContainer; private Random random; private CosmosAsyncClient client; private List<TestObject> createdDocuments = new ArrayList<>(); @Factory(dataProvider = "clientBuildersWithDirectSession") public QueryValidationTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); random = new Random(); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); client = this.getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdContainer = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdContainer); createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer)); } @Test(groups = {"unit"}, priority = 1) public void queryPlanCacheEnabledFlag() { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder(); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); assertThat(Configs.isQueryPlanCachingEnabled()).isTrue(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQuery() { /* The idea here is to query documents in pages, query all the documents(with pagesize as num_documents and compare the results. */ String query = "select * from c order by c.prop ASC"; queryWithOrderByAndAssert( DEFAULT_PAGE_SIZE, DEFAULT_NUM_DOCUMENTS, query, createdContainer, d -> d.getProp(), createdDocuments); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQueryForLargeCollection() { CosmosContainerProperties containerProperties = getCollectionDefinition(); createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(100000), new CosmosContainerRequestOptions() ).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerProperties.getId()); int partitionDocCount = 5; int pageSize = partitionDocCount + 1; String partition1Key = UUID.randomUUID().toString(); String partition2Key = UUID.randomUUID().toString(); List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition1Key), container)); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition2Key), container)); String query = String.format( "select * from c where c.mypk in ('%s', '%s') order by c.constantProp DESC", partition1Key, partition2Key); queryWithOrderByAndAssert( pageSize, partitionDocCount * 2, query, container, d -> d.getConstantProp(), documentsInserted); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryOptionNullValidation() { String query = "Select top 1 * from c"; FeedResponse<CosmosDatabaseProperties> databases = client.queryDatabases(query, null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); databases = client.queryDatabases(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); FeedResponse<CosmosContainerProperties> containers = createdDatabase.readAllContainers(null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isGreaterThanOrEqualTo(1); containers = createdDatabase.queryContainers(query, null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); containers = createdDatabase.queryContainers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); CosmosUserProperties userProperties = new CosmosUserProperties(); userProperties.setId(UUID.randomUUID().toString()); createdDatabase.createUser(userProperties).block(); FeedResponse<CosmosUserProperties> users = createdDatabase.queryUsers(query, null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); users = createdDatabase.queryUsers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); CosmosPermissionProperties cosmosPermissionProperties = new CosmosPermissionProperties(); cosmosPermissionProperties.setContainerName(createdContainer.getId()); cosmosPermissionProperties.setPermissionMode(PermissionMode.READ); cosmosPermissionProperties.setId(UUID.randomUUID().toString()); createdDatabase.getUser(userProperties.getId()).createPermission(cosmosPermissionProperties, null).block(); FeedResponse<CosmosPermissionProperties> permissions = createdDatabase.getUser(userProperties.getId()).queryPermissions(query, null).byPage(1).blockFirst(); assertThat(permissions.getResults().size()).isEqualTo(1); FeedResponse<TestObject> items = createdContainer.queryItems(query, null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); items = createdContainer.queryItems(new SqlQuerySpec(query), null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); createdContainer.getScripts().createStoredProcedure(getCosmosStoredProcedureProperties()).block(); createdContainer.getScripts().createTrigger(getCosmosTriggerProperties()).block(); createdContainer.getScripts().createUserDefinedFunction(getCosmosUserDefinedFunctionProperties()).block(); FeedResponse<CosmosStoredProcedureProperties> sprocs = createdContainer.getScripts().queryStoredProcedures(query, null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); sprocs = createdContainer.getScripts().queryStoredProcedures(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); FeedResponse<CosmosTriggerProperties> triggers = createdContainer.getScripts().queryTriggers(query, null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); triggers = createdContainer.getScripts().queryTriggers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); FeedResponse<CosmosUserDefinedFunctionProperties> udfs = createdContainer.getScripts().queryUserDefinedFunctions(query, null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); udfs = createdContainer.getScripts().queryUserDefinedFunctions(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); try { createdContainer.queryConflicts(query, null).byPage(1).blockFirst(); } catch (CosmosException exception) { assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); } createdContainer.readAllConflicts(null).byPage(1).blockFirst(); } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c "}, new Object[] { "select * from c order by c.prop ASC"}, }; } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionCorrectness(String query) { String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer)); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); List<TestObject> values1 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(contextClient.getQueryPlanCache().containsKey(query)).isTrue(); List<TestObject> values2 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).containsExactlyElementsOf(ids2); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionParameterizedQueriesCorrectness() { SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(); sqlQuerySpec.setQueryText("select * from c where c.id = @id"); String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); List<TestObject> pk2Docs = this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer); documentsInserted.addAll(pk2Docs); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(0).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); List<TestObject> values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(1).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isTrue(); List<TestObject> values2 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).doesNotContainAnyElementsOf(ids2); sqlQuerySpec.setQueryText("select top @top * from c"); int topValue = 2; sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@top", 2))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 10) @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private <T> List<T> queryAndGetResults(SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> type) { CosmosPagedFlux<T> queryPagedFlux = createdContainer.queryItems(querySpec, options, type); TestSubscriber<T> testSubscriber = new TestSubscriber<>(); queryPagedFlux.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); return testSubscriber.values(); } private <T> List<T> queryWithContinuationTokens(String query, int pageSize, CosmosAsyncContainer container, Class<T> klass) { logger.info("querying: " + query); String requestContinuation = null; List<T> receivedDocuments = new ArrayList<>(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); do { CosmosPagedFlux<T> queryPagedFlux = container.queryItems(query, options, klass); FeedResponse<T> firstPage = queryPagedFlux.byPage(requestContinuation, pageSize).blockFirst(); assert firstPage != null; requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); } while (requestContinuation != null); return receivedDocuments; } private TestObject getDocumentDefinition(String documentId, String partitionKey) { int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2); TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey); return doc; } private <T> List<String> sortTestObjectsAndCollectIds( List<TestObject> createdDocuments, Function<TestObject, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> d.getId()).collect(Collectors.toList()); } private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) { List<TestObject> documentsToInsert = new ArrayList<>(); for (int i = 0; i < documentCount; i++) { documentsToInsert.add( getDocumentDefinition( UUID.randomUUID().toString(), partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size())))); } List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert); waitIfNeededForReplicasToCatchUp(this.getClientBuilder()); return documentInserted; } private <T extends Comparable<T>> void queryWithOrderByAndAssert( int pageSize, int documentCount, String query, CosmosAsyncContainer container, Function<TestObject, T> extractProp, List<TestObject> documentsInserted) { List<TestObject> documentsPaged = queryWithContinuationTokens(query, pageSize, container, TestObject.class); List<TestObject> allDocuments = queryWithContinuationTokens(query, documentCount, container, TestObject.class); Comparator<T> validatorComparator = Comparator.nullsFirst(Comparator.<T>naturalOrder()); List<String> expectedResourceIds = sortTestObjectsAndCollectIds(documentsInserted, extractProp, validatorComparator); List<String> docIds1 = documentsPaged.stream().map(TestObject::getId).collect(Collectors.toList()); List<String> docIds2 = allDocuments.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(docIds2).containsExactlyInAnyOrderElementsOf(expectedResourceIds); assertThat(docIds1).containsExactlyElementsOf(docIds2); } private static CosmosUserDefinedFunctionProperties getCosmosUserDefinedFunctionProperties() { CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return udf; } private static CosmosTriggerProperties getCosmosTriggerProperties() { CosmosTriggerProperties trigger = new CosmosTriggerProperties(UUID.randomUUID().toString(), "function() {var " + "x = 10;}"); trigger.setTriggerOperation(TriggerOperation.CREATE); trigger.setTriggerType(TriggerType.PRE); return trigger; } private static CosmosStoredProcedureProperties getCosmosStoredProcedureProperties() { CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return storedProcedureDef; } static class TestObject { String id; String name; int prop; String mypk; String constantProp = "constantProp"; public TestObject() { } public TestObject(String id, String name, int prop, String mypk) { this.id = id; this.name = name; this.prop = prop; this.mypk = mypk; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getProp() { return prop; } public void setProp(final int prop) { this.prop = prop; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public String getConstantProp() { return constantProp; } } }
class QueryValidationTests extends TestSuiteBase { private static final int DEFAULT_NUM_DOCUMENTS = 1000; private static final int DEFAULT_PAGE_SIZE = 100; private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdContainer; private Random random; private CosmosAsyncClient client; private List<TestObject> createdDocuments = new ArrayList<>(); @Factory(dataProvider = "clientBuildersWithDirectSession") public QueryValidationTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); random = new Random(); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); client = this.getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdContainer = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdContainer); createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer)); } @Test(groups = {"unit"}, priority = 1) public void queryPlanCacheEnabledFlag() { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder(); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); assertThat(Configs.isQueryPlanCachingEnabled()).isTrue(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQuery() { /* The idea here is to query documents in pages, query all the documents(with pagesize as num_documents and compare the results. */ String query = "select * from c order by c.prop ASC"; queryWithOrderByAndAssert( DEFAULT_PAGE_SIZE, DEFAULT_NUM_DOCUMENTS, query, createdContainer, d -> d.getProp(), createdDocuments); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQueryForLargeCollection() { CosmosContainerProperties containerProperties = getCollectionDefinition(); createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(100000), new CosmosContainerRequestOptions() ).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerProperties.getId()); int partitionDocCount = 5; int pageSize = partitionDocCount + 1; String partition1Key = UUID.randomUUID().toString(); String partition2Key = UUID.randomUUID().toString(); List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition1Key), container)); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition2Key), container)); String query = String.format( "select * from c where c.mypk in ('%s', '%s') order by c.constantProp DESC", partition1Key, partition2Key); queryWithOrderByAndAssert( pageSize, partitionDocCount * 2, query, container, d -> d.getConstantProp(), documentsInserted); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryOptionNullValidation() { String query = "Select top 1 * from c"; FeedResponse<CosmosDatabaseProperties> databases = client.queryDatabases(query, null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); databases = client.queryDatabases(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); FeedResponse<CosmosContainerProperties> containers = createdDatabase.readAllContainers(null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isGreaterThanOrEqualTo(1); containers = createdDatabase.queryContainers(query, null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); containers = createdDatabase.queryContainers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); CosmosUserProperties userProperties = new CosmosUserProperties(); userProperties.setId(UUID.randomUUID().toString()); createdDatabase.createUser(userProperties).block(); FeedResponse<CosmosUserProperties> users = createdDatabase.queryUsers(query, null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); users = createdDatabase.queryUsers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); CosmosPermissionProperties cosmosPermissionProperties = new CosmosPermissionProperties(); cosmosPermissionProperties.setContainerName(createdContainer.getId()); cosmosPermissionProperties.setPermissionMode(PermissionMode.READ); cosmosPermissionProperties.setId(UUID.randomUUID().toString()); createdDatabase.getUser(userProperties.getId()).createPermission(cosmosPermissionProperties, null).block(); FeedResponse<CosmosPermissionProperties> permissions = createdDatabase.getUser(userProperties.getId()).queryPermissions(query, null).byPage(1).blockFirst(); assertThat(permissions.getResults().size()).isEqualTo(1); FeedResponse<TestObject> items = createdContainer.queryItems(query, null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); items = createdContainer.queryItems(new SqlQuerySpec(query), null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); createdContainer.getScripts().createStoredProcedure(getCosmosStoredProcedureProperties()).block(); createdContainer.getScripts().createTrigger(getCosmosTriggerProperties()).block(); createdContainer.getScripts().createUserDefinedFunction(getCosmosUserDefinedFunctionProperties()).block(); FeedResponse<CosmosStoredProcedureProperties> sprocs = createdContainer.getScripts().queryStoredProcedures(query, null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); sprocs = createdContainer.getScripts().queryStoredProcedures(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); FeedResponse<CosmosTriggerProperties> triggers = createdContainer.getScripts().queryTriggers(query, null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); triggers = createdContainer.getScripts().queryTriggers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); FeedResponse<CosmosUserDefinedFunctionProperties> udfs = createdContainer.getScripts().queryUserDefinedFunctions(query, null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); udfs = createdContainer.getScripts().queryUserDefinedFunctions(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); try { createdContainer.queryConflicts(query, null).byPage(1).blockFirst(); } catch (CosmosException exception) { assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); } createdContainer.readAllConflicts(null).byPage(1).blockFirst(); } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c "}, new Object[] { "select * from c order by c.prop ASC"}, }; } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionCorrectness(String query) { String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer)); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); List<TestObject> values1 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(contextClient.getQueryPlanCache().containsKey(query)).isTrue(); List<TestObject> values2 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).containsExactlyElementsOf(ids2); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionParameterizedQueriesCorrectness() { SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(); sqlQuerySpec.setQueryText("select * from c where c.id = @id"); String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); List<TestObject> pk2Docs = this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer); documentsInserted.addAll(pk2Docs); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(0).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); List<TestObject> values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(1).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isTrue(); List<TestObject> values2 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).doesNotContainAnyElementsOf(ids2); sqlQuerySpec.setQueryText("select top @top * from c"); int topValue = 2; sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@top", 2))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 10) @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private <T> List<T> queryAndGetResults(SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> type) { CosmosPagedFlux<T> queryPagedFlux = createdContainer.queryItems(querySpec, options, type); TestSubscriber<T> testSubscriber = new TestSubscriber<>(); queryPagedFlux.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); return testSubscriber.values(); } private <T> List<T> queryWithContinuationTokens(String query, int pageSize, CosmosAsyncContainer container, Class<T> klass) { logger.info("querying: " + query); String requestContinuation = null; List<T> receivedDocuments = new ArrayList<>(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); do { CosmosPagedFlux<T> queryPagedFlux = container.queryItems(query, options, klass); FeedResponse<T> firstPage = queryPagedFlux.byPage(requestContinuation, pageSize).blockFirst(); assert firstPage != null; requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); } while (requestContinuation != null); return receivedDocuments; } private TestObject getDocumentDefinition(String documentId, String partitionKey) { int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2); TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey); return doc; } private <T> List<String> sortTestObjectsAndCollectIds( List<TestObject> createdDocuments, Function<TestObject, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> d.getId()).collect(Collectors.toList()); } private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) { List<TestObject> documentsToInsert = new ArrayList<>(); for (int i = 0; i < documentCount; i++) { documentsToInsert.add( getDocumentDefinition( UUID.randomUUID().toString(), partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size())))); } List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert); waitIfNeededForReplicasToCatchUp(this.getClientBuilder()); return documentInserted; } private <T extends Comparable<T>> void queryWithOrderByAndAssert( int pageSize, int documentCount, String query, CosmosAsyncContainer container, Function<TestObject, T> extractProp, List<TestObject> documentsInserted) { List<TestObject> documentsPaged = queryWithContinuationTokens(query, pageSize, container, TestObject.class); List<TestObject> allDocuments = queryWithContinuationTokens(query, documentCount, container, TestObject.class); Comparator<T> validatorComparator = Comparator.nullsFirst(Comparator.<T>naturalOrder()); List<String> expectedResourceIds = sortTestObjectsAndCollectIds(documentsInserted, extractProp, validatorComparator); List<String> docIds1 = documentsPaged.stream().map(TestObject::getId).collect(Collectors.toList()); List<String> docIds2 = allDocuments.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(docIds2).containsExactlyInAnyOrderElementsOf(expectedResourceIds); assertThat(docIds1).containsExactlyElementsOf(docIds2); } private static CosmosUserDefinedFunctionProperties getCosmosUserDefinedFunctionProperties() { CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return udf; } private static CosmosTriggerProperties getCosmosTriggerProperties() { CosmosTriggerProperties trigger = new CosmosTriggerProperties(UUID.randomUUID().toString(), "function() {var " + "x = 10;}"); trigger.setTriggerOperation(TriggerOperation.CREATE); trigger.setTriggerType(TriggerType.PRE); return trigger; } private static CosmosStoredProcedureProperties getCosmosStoredProcedureProperties() { CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return storedProcedureDef; } static class TestObject { String id; String name; int prop; String mypk; String constantProp = "constantProp"; public TestObject() { } public TestObject(String id, String name, int prop, String mypk) { this.id = id; this.name = name; this.prop = prop; this.mypk = mypk; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getProp() { return prop; } public void setProp(final int prop) { this.prop = prop; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public String getConstantProp() { return constantProp; } } }
Done
public void splitQueryContinuationToken() { String containerId = "splittestcontainer_" + UUID.randomUUID(); int itemCount = 20; CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client); List<TestObject> testObjects = insertDocuments(itemCount, Arrays.asList("CA", "US"), container); List<String> sortedObjects = testObjects.stream() .sorted(Comparator.comparing(TestObject::getProp)) .map(TestObject::getId) .collect(Collectors.toList()); String query = "Select * from c"; String orderByQuery = "select * from c order by c.prop"; List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, asyncDocumentClient); String requestContinuation = null; String orderByRequestContinuation = null; int preferredPageSize = 15; ArrayList<TestObject> resultList = new ArrayList<>(); ArrayList<TestObject> orderByResultList = new ArrayList<>(); FeedResponse<TestObject> jsonNodeFeedResponse = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert jsonNodeFeedResponse != null; resultList.addAll(jsonNodeFeedResponse.getResults()); requestContinuation = jsonNodeFeedResponse.getContinuationToken(); FeedResponse<TestObject> orderByFeedResponse = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert orderByFeedResponse != null; orderByResultList.addAll(orderByFeedResponse.getResults()); orderByRequestContinuation = orderByFeedResponse.getContinuationToken(); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); try { Thread.sleep(10 * 1000); } catch (InterruptedException e) { e.printStackTrace(); } throughputResponse = container.readThroughput().block(); } logger.info("Resuming query from the continuation"); List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, asyncDocumentClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); container.readItem(testObjects.get(0).getId(), new PartitionKey(testObjects.get(0).getMypk()), JsonNode.class).block(); Flux<FeedResponse<TestObject>> feedResponseFlux = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(requestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : feedResponseFlux.toIterable()) { resultList.addAll(nodeFeedResponse.getResults()); } Flux<FeedResponse<TestObject>> orderfeedResponseFlux = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(orderByRequestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : orderfeedResponseFlux.toIterable()) { orderByResultList.addAll(nodeFeedResponse.getResults()); } List<String> sourceIds = testObjects.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> resultIds = resultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> orderResultIds = orderByResultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds) .as("Resuming query from continuation token after split validated"); assertThat(orderResultIds).containsExactlyElementsOf(sortedObjects) .as("Resuming orderby query from continuation token after split validated"); container.delete().block(); }
}
public void splitQueryContinuationToken() throws Exception { String containerId = "splittestcontainer_" + UUID.randomUUID(); int itemCount = 20; CosmosContainerProperties containerProperties = new CosmosContainerProperties(containerId, "/mypk"); CosmosContainerResponse containerResponse = createdDatabase.createContainer(containerProperties).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerId); AsyncDocumentClient asyncDocumentClient = BridgeInternal.getContextClient(this.client); List<TestObject> testObjects = insertDocuments(itemCount, Arrays.asList("CA", "US"), container); List<String> sortedObjects = testObjects.stream() .sorted(Comparator.comparing(TestObject::getProp)) .map(TestObject::getId) .collect(Collectors.toList()); String query = "Select * from c"; String orderByQuery = "select * from c order by c.prop"; List<PartitionKeyRange> partitionKeyRanges = getPartitionKeyRanges(containerId, asyncDocumentClient); String requestContinuation = null; String orderByRequestContinuation = null; int preferredPageSize = 15; ArrayList<TestObject> resultList = new ArrayList<>(); ArrayList<TestObject> orderByResultList = new ArrayList<>(); FeedResponse<TestObject> jsonNodeFeedResponse = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert jsonNodeFeedResponse != null; resultList.addAll(jsonNodeFeedResponse.getResults()); requestContinuation = jsonNodeFeedResponse.getContinuationToken(); FeedResponse<TestObject> orderByFeedResponse = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(preferredPageSize).blockFirst(); assert orderByFeedResponse != null; orderByResultList.addAll(orderByFeedResponse.getResults()); orderByRequestContinuation = orderByFeedResponse.getContinuationToken(); logger.info("Scaling up throughput for split"); ThroughputProperties throughputProperties = ThroughputProperties.createManualThroughput(16000); ThroughputResponse throughputResponse = container.replaceThroughput(throughputProperties).block(); logger.info("Throughput replace request submitted for {} ", throughputResponse.getProperties().getManualThroughput()); throughputResponse = container.readThroughput().block(); while (true) { assert throughputResponse != null; if (!throughputResponse.isReplacePending()) { break; } logger.info("Waiting for split to complete"); Thread.sleep(10 * 1000); throughputResponse = container.readThroughput().block(); } logger.info("Resuming query from the continuation"); List<PartitionKeyRange> partitionKeyRangesAfterSplit = getPartitionKeyRanges(containerId, asyncDocumentClient); assertThat(partitionKeyRangesAfterSplit.size()).isGreaterThan(partitionKeyRanges.size()) .as("Partition ranges should increase after split"); logger.info("After split num partitions = {}", partitionKeyRangesAfterSplit.size()); container.readItem(testObjects.get(0).getId(), new PartitionKey(testObjects.get(0).getMypk()), JsonNode.class).block(); Flux<FeedResponse<TestObject>> feedResponseFlux = container .queryItems(query, new CosmosQueryRequestOptions(), TestObject.class) .byPage(requestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : feedResponseFlux.toIterable()) { resultList.addAll(nodeFeedResponse.getResults()); } Flux<FeedResponse<TestObject>> orderfeedResponseFlux = container .queryItems(orderByQuery, new CosmosQueryRequestOptions(), TestObject.class) .byPage(orderByRequestContinuation, preferredPageSize); for (FeedResponse<TestObject> nodeFeedResponse : orderfeedResponseFlux.toIterable()) { orderByResultList.addAll(nodeFeedResponse.getResults()); } List<String> sourceIds = testObjects.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> resultIds = resultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); List<String> orderResultIds = orderByResultList.stream().map(obj -> obj.getId()).collect(Collectors.toList()); assertThat(resultIds).containsExactlyInAnyOrderElementsOf(sourceIds) .as("Resuming query from continuation token after split validated"); assertThat(orderResultIds).containsExactlyElementsOf(sortedObjects) .as("Resuming orderby query from continuation token after split validated"); container.delete().block(); }
class QueryValidationTests extends TestSuiteBase { private static final int DEFAULT_NUM_DOCUMENTS = 1000; private static final int DEFAULT_PAGE_SIZE = 100; private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdContainer; private Random random; private CosmosAsyncClient client; private List<TestObject> createdDocuments = new ArrayList<>(); @Factory(dataProvider = "clientBuildersWithDirectSession") public QueryValidationTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); random = new Random(); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); client = this.getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdContainer = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdContainer); createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer)); } @Test(groups = {"unit"}, priority = 1) public void queryPlanCacheEnabledFlag() { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder(); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); assertThat(Configs.isQueryPlanCachingEnabled()).isTrue(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQuery() { /* The idea here is to query documents in pages, query all the documents(with pagesize as num_documents and compare the results. */ String query = "select * from c order by c.prop ASC"; queryWithOrderByAndAssert( DEFAULT_PAGE_SIZE, DEFAULT_NUM_DOCUMENTS, query, createdContainer, d -> d.getProp(), createdDocuments); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQueryForLargeCollection() { CosmosContainerProperties containerProperties = getCollectionDefinition(); createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(100000), new CosmosContainerRequestOptions() ).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerProperties.getId()); int partitionDocCount = 5; int pageSize = partitionDocCount + 1; String partition1Key = UUID.randomUUID().toString(); String partition2Key = UUID.randomUUID().toString(); List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition1Key), container)); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition2Key), container)); String query = String.format( "select * from c where c.mypk in ('%s', '%s') order by c.constantProp DESC", partition1Key, partition2Key); queryWithOrderByAndAssert( pageSize, partitionDocCount * 2, query, container, d -> d.getConstantProp(), documentsInserted); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryOptionNullValidation() { String query = "Select top 1 * from c"; FeedResponse<CosmosDatabaseProperties> databases = client.queryDatabases(query, null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); databases = client.queryDatabases(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); FeedResponse<CosmosContainerProperties> containers = createdDatabase.readAllContainers(null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isGreaterThanOrEqualTo(1); containers = createdDatabase.queryContainers(query, null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); containers = createdDatabase.queryContainers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); CosmosUserProperties userProperties = new CosmosUserProperties(); userProperties.setId(UUID.randomUUID().toString()); createdDatabase.createUser(userProperties).block(); FeedResponse<CosmosUserProperties> users = createdDatabase.queryUsers(query, null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); users = createdDatabase.queryUsers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); CosmosPermissionProperties cosmosPermissionProperties = new CosmosPermissionProperties(); cosmosPermissionProperties.setContainerName(createdContainer.getId()); cosmosPermissionProperties.setPermissionMode(PermissionMode.READ); cosmosPermissionProperties.setId(UUID.randomUUID().toString()); createdDatabase.getUser(userProperties.getId()).createPermission(cosmosPermissionProperties, null).block(); FeedResponse<CosmosPermissionProperties> permissions = createdDatabase.getUser(userProperties.getId()).queryPermissions(query, null).byPage(1).blockFirst(); assertThat(permissions.getResults().size()).isEqualTo(1); FeedResponse<TestObject> items = createdContainer.queryItems(query, null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); items = createdContainer.queryItems(new SqlQuerySpec(query), null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); createdContainer.getScripts().createStoredProcedure(getCosmosStoredProcedureProperties()).block(); createdContainer.getScripts().createTrigger(getCosmosTriggerProperties()).block(); createdContainer.getScripts().createUserDefinedFunction(getCosmosUserDefinedFunctionProperties()).block(); FeedResponse<CosmosStoredProcedureProperties> sprocs = createdContainer.getScripts().queryStoredProcedures(query, null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); sprocs = createdContainer.getScripts().queryStoredProcedures(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); FeedResponse<CosmosTriggerProperties> triggers = createdContainer.getScripts().queryTriggers(query, null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); triggers = createdContainer.getScripts().queryTriggers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); FeedResponse<CosmosUserDefinedFunctionProperties> udfs = createdContainer.getScripts().queryUserDefinedFunctions(query, null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); udfs = createdContainer.getScripts().queryUserDefinedFunctions(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); try { createdContainer.queryConflicts(query, null).byPage(1).blockFirst(); } catch (CosmosException exception) { assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); } createdContainer.readAllConflicts(null).byPage(1).blockFirst(); } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c "}, new Object[] { "select * from c order by c.prop ASC"}, }; } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionCorrectness(String query) { String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer)); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); List<TestObject> values1 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(contextClient.getQueryPlanCache().containsKey(query)).isTrue(); List<TestObject> values2 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).containsExactlyElementsOf(ids2); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionParameterizedQueriesCorrectness() { SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(); sqlQuerySpec.setQueryText("select * from c where c.id = @id"); String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); List<TestObject> pk2Docs = this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer); documentsInserted.addAll(pk2Docs); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(0).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); List<TestObject> values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(1).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isTrue(); List<TestObject> values2 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).doesNotContainAnyElementsOf(ids2); sqlQuerySpec.setQueryText("select top @top * from c"); int topValue = 2; sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@top", 2))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 10) @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private <T> List<T> queryAndGetResults(SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> type) { CosmosPagedFlux<T> queryPagedFlux = createdContainer.queryItems(querySpec, options, type); TestSubscriber<T> testSubscriber = new TestSubscriber<>(); queryPagedFlux.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); return testSubscriber.values(); } private <T> List<T> queryWithContinuationTokens(String query, int pageSize, CosmosAsyncContainer container, Class<T> klass) { logger.info("querying: " + query); String requestContinuation = null; List<T> receivedDocuments = new ArrayList<>(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); do { CosmosPagedFlux<T> queryPagedFlux = container.queryItems(query, options, klass); FeedResponse<T> firstPage = queryPagedFlux.byPage(requestContinuation, pageSize).blockFirst(); assert firstPage != null; requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); } while (requestContinuation != null); return receivedDocuments; } private TestObject getDocumentDefinition(String documentId, String partitionKey) { int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2); TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey); return doc; } private <T> List<String> sortTestObjectsAndCollectIds( List<TestObject> createdDocuments, Function<TestObject, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> d.getId()).collect(Collectors.toList()); } private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) { List<TestObject> documentsToInsert = new ArrayList<>(); for (int i = 0; i < documentCount; i++) { documentsToInsert.add( getDocumentDefinition( UUID.randomUUID().toString(), partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size())))); } List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert); waitIfNeededForReplicasToCatchUp(this.getClientBuilder()); return documentInserted; } private <T extends Comparable<T>> void queryWithOrderByAndAssert( int pageSize, int documentCount, String query, CosmosAsyncContainer container, Function<TestObject, T> extractProp, List<TestObject> documentsInserted) { List<TestObject> documentsPaged = queryWithContinuationTokens(query, pageSize, container, TestObject.class); List<TestObject> allDocuments = queryWithContinuationTokens(query, documentCount, container, TestObject.class); Comparator<T> validatorComparator = Comparator.nullsFirst(Comparator.<T>naturalOrder()); List<String> expectedResourceIds = sortTestObjectsAndCollectIds(documentsInserted, extractProp, validatorComparator); List<String> docIds1 = documentsPaged.stream().map(TestObject::getId).collect(Collectors.toList()); List<String> docIds2 = allDocuments.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(docIds2).containsExactlyInAnyOrderElementsOf(expectedResourceIds); assertThat(docIds1).containsExactlyElementsOf(docIds2); } private static CosmosUserDefinedFunctionProperties getCosmosUserDefinedFunctionProperties() { CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return udf; } private static CosmosTriggerProperties getCosmosTriggerProperties() { CosmosTriggerProperties trigger = new CosmosTriggerProperties(UUID.randomUUID().toString(), "function() {var " + "x = 10;}"); trigger.setTriggerOperation(TriggerOperation.CREATE); trigger.setTriggerType(TriggerType.PRE); return trigger; } private static CosmosStoredProcedureProperties getCosmosStoredProcedureProperties() { CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return storedProcedureDef; } static class TestObject { String id; String name; int prop; String mypk; String constantProp = "constantProp"; public TestObject() { } public TestObject(String id, String name, int prop, String mypk) { this.id = id; this.name = name; this.prop = prop; this.mypk = mypk; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getProp() { return prop; } public void setProp(final int prop) { this.prop = prop; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public String getConstantProp() { return constantProp; } } }
class QueryValidationTests extends TestSuiteBase { private static final int DEFAULT_NUM_DOCUMENTS = 1000; private static final int DEFAULT_PAGE_SIZE = 100; private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdContainer; private Random random; private CosmosAsyncClient client; private List<TestObject> createdDocuments = new ArrayList<>(); @Factory(dataProvider = "clientBuildersWithDirectSession") public QueryValidationTests(CosmosClientBuilder clientBuilder) { super(clientBuilder); random = new Random(); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); client = this.getClientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdContainer = getSharedMultiPartitionCosmosContainer(client); truncateCollection(createdContainer); createdDocuments.addAll(this.insertDocuments(DEFAULT_NUM_DOCUMENTS, null, createdContainer)); } @Test(groups = {"unit"}, priority = 1) public void queryPlanCacheEnabledFlag() { System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder(); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "true"); assertThat(Configs.isQueryPlanCachingEnabled()).isTrue(); System.setProperty("COSMOS.QUERYPLAN_CACHING_ENABLED", "false"); assertThat(Configs.isQueryPlanCachingEnabled()).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQuery() { /* The idea here is to query documents in pages, query all the documents(with pagesize as num_documents and compare the results. */ String query = "select * from c order by c.prop ASC"; queryWithOrderByAndAssert( DEFAULT_PAGE_SIZE, DEFAULT_NUM_DOCUMENTS, query, createdContainer, d -> d.getProp(), createdDocuments); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void orderByQueryForLargeCollection() { CosmosContainerProperties containerProperties = getCollectionDefinition(); createdDatabase.createContainer( containerProperties, ThroughputProperties.createManualThroughput(100000), new CosmosContainerRequestOptions() ).block(); CosmosAsyncContainer container = createdDatabase.getContainer(containerProperties.getId()); int partitionDocCount = 5; int pageSize = partitionDocCount + 1; String partition1Key = UUID.randomUUID().toString(); String partition2Key = UUID.randomUUID().toString(); List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition1Key), container)); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(partition2Key), container)); String query = String.format( "select * from c where c.mypk in ('%s', '%s') order by c.constantProp DESC", partition1Key, partition2Key); queryWithOrderByAndAssert( pageSize, partitionDocCount * 2, query, container, d -> d.getConstantProp(), documentsInserted); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryOptionNullValidation() { String query = "Select top 1 * from c"; FeedResponse<CosmosDatabaseProperties> databases = client.queryDatabases(query, null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); databases = client.queryDatabases(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(databases.getResults().size()).isEqualTo(1); FeedResponse<CosmosContainerProperties> containers = createdDatabase.readAllContainers(null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isGreaterThanOrEqualTo(1); containers = createdDatabase.queryContainers(query, null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); containers = createdDatabase.queryContainers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(containers.getResults().size()).isEqualTo(1); CosmosUserProperties userProperties = new CosmosUserProperties(); userProperties.setId(UUID.randomUUID().toString()); createdDatabase.createUser(userProperties).block(); FeedResponse<CosmosUserProperties> users = createdDatabase.queryUsers(query, null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); users = createdDatabase.queryUsers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(users.getResults().size()).isEqualTo(1); CosmosPermissionProperties cosmosPermissionProperties = new CosmosPermissionProperties(); cosmosPermissionProperties.setContainerName(createdContainer.getId()); cosmosPermissionProperties.setPermissionMode(PermissionMode.READ); cosmosPermissionProperties.setId(UUID.randomUUID().toString()); createdDatabase.getUser(userProperties.getId()).createPermission(cosmosPermissionProperties, null).block(); FeedResponse<CosmosPermissionProperties> permissions = createdDatabase.getUser(userProperties.getId()).queryPermissions(query, null).byPage(1).blockFirst(); assertThat(permissions.getResults().size()).isEqualTo(1); FeedResponse<TestObject> items = createdContainer.queryItems(query, null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); items = createdContainer.queryItems(new SqlQuerySpec(query), null, TestObject.class).byPage(1).blockFirst(); assertThat(items.getResults().size()).isEqualTo(1); createdContainer.getScripts().createStoredProcedure(getCosmosStoredProcedureProperties()).block(); createdContainer.getScripts().createTrigger(getCosmosTriggerProperties()).block(); createdContainer.getScripts().createUserDefinedFunction(getCosmosUserDefinedFunctionProperties()).block(); FeedResponse<CosmosStoredProcedureProperties> sprocs = createdContainer.getScripts().queryStoredProcedures(query, null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); sprocs = createdContainer.getScripts().queryStoredProcedures(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(sprocs.getResults().size()).isEqualTo(1); FeedResponse<CosmosTriggerProperties> triggers = createdContainer.getScripts().queryTriggers(query, null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); triggers = createdContainer.getScripts().queryTriggers(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(triggers.getResults().size()).isEqualTo(1); FeedResponse<CosmosUserDefinedFunctionProperties> udfs = createdContainer.getScripts().queryUserDefinedFunctions(query, null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); udfs = createdContainer.getScripts().queryUserDefinedFunctions(new SqlQuerySpec(query), null).byPage(1).blockFirst(); assertThat(udfs.getResults().size()).isEqualTo(1); try { createdContainer.queryConflicts(query, null).byPage(1).blockFirst(); } catch (CosmosException exception) { assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.BADREQUEST); } createdContainer.readAllConflicts(null).byPage(1).blockFirst(); } @DataProvider(name = "query") private Object[][] query() { return new Object[][]{ new Object[] { "Select * from c "}, new Object[] { "select * from c order by c.prop ASC"}, }; } @Test(groups = {"simple"}, dataProvider = "query", timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionCorrectness(String query) { String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer)); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); List<TestObject> values1 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(contextClient.getQueryPlanCache().containsKey(query)).isTrue(); List<TestObject> values2 = queryAndGetResults(new SqlQuerySpec(query), options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).containsExactlyElementsOf(ids2); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void queryPlanCacheSinglePartitionParameterizedQueriesCorrectness() { SqlQuerySpec sqlQuerySpec = new SqlQuerySpec(); sqlQuerySpec.setQueryText("select * from c where c.id = @id"); String pk1 = "pk1"; String pk2 = "pk2"; int partitionDocCount = 5; List<TestObject> documentsInserted = new ArrayList<>(); documentsInserted.addAll(this.insertDocuments( partitionDocCount, Collections.singletonList(pk1), createdContainer)); AsyncDocumentClient contextClient = CosmosBridgeInternal.getContextClient(createdContainer); List<TestObject> pk2Docs = this.insertDocuments( partitionDocCount, Collections.singletonList(pk2), createdContainer); documentsInserted.addAll(pk2Docs); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey(pk2)); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(0).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); List<TestObject> values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids1 = values1.stream().map(TestObject::getId).collect(Collectors.toList()); sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@id", pk2Docs.get(1).getId()))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isTrue(); List<TestObject> values2 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); List<String> ids2 = values2.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(ids1).doesNotContainAnyElementsOf(ids2); sqlQuerySpec.setQueryText("select top @top * from c"); int topValue = 2; sqlQuerySpec.setParameters(Collections.singletonList(new SqlParameter("@top", 2))); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); values1 = queryAndGetResults(sqlQuerySpec, options, TestObject.class); assertThat(contextClient.getQueryPlanCache().containsKey(sqlQuerySpec.getQueryText())).isFalse(); } @Test(groups = {"simple"}, timeOut = TIMEOUT * 10) @NotNull private List<PartitionKeyRange> getPartitionKeyRanges( String containerId, AsyncDocumentClient asyncDocumentClient) { List<PartitionKeyRange> partitionKeyRanges = new ArrayList<>(); List<FeedResponse<PartitionKeyRange>> partitionFeedResponseList = asyncDocumentClient .readPartitionKeyRanges("/dbs/" + createdDatabase.getId() + "/colls/" + containerId, new CosmosQueryRequestOptions()) .collectList().block(); partitionFeedResponseList.forEach(f -> partitionKeyRanges.addAll(f.getResults())); return partitionKeyRanges; } private <T> List<T> queryAndGetResults(SqlQuerySpec querySpec, CosmosQueryRequestOptions options, Class<T> type) { CosmosPagedFlux<T> queryPagedFlux = createdContainer.queryItems(querySpec, options, type); TestSubscriber<T> testSubscriber = new TestSubscriber<>(); queryPagedFlux.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); return testSubscriber.values(); } private <T> List<T> queryWithContinuationTokens(String query, int pageSize, CosmosAsyncContainer container, Class<T> klass) { logger.info("querying: " + query); String requestContinuation = null; List<T> receivedDocuments = new ArrayList<>(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setMaxDegreeOfParallelism(2); do { CosmosPagedFlux<T> queryPagedFlux = container.queryItems(query, options, klass); FeedResponse<T> firstPage = queryPagedFlux.byPage(requestContinuation, pageSize).blockFirst(); assert firstPage != null; requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); } while (requestContinuation != null); return receivedDocuments; } private TestObject getDocumentDefinition(String documentId, String partitionKey) { int randInt = random.nextInt(DEFAULT_NUM_DOCUMENTS / 2); TestObject doc = new TestObject(documentId, "name" + randInt, randInt, partitionKey); return doc; } private <T> List<String> sortTestObjectsAndCollectIds( List<TestObject> createdDocuments, Function<TestObject, T> extractProp, Comparator<T> comparer) { return createdDocuments.stream() .sorted((d1, d2) -> comparer.compare(extractProp.apply(d1), extractProp.apply(d2))) .map(d -> d.getId()).collect(Collectors.toList()); } private List<TestObject> insertDocuments(int documentCount, List<String> partitionKeys, CosmosAsyncContainer container) { List<TestObject> documentsToInsert = new ArrayList<>(); for (int i = 0; i < documentCount; i++) { documentsToInsert.add( getDocumentDefinition( UUID.randomUUID().toString(), partitionKeys == null ? UUID.randomUUID().toString() : partitionKeys.get(random.nextInt(partitionKeys.size())))); } List<TestObject> documentInserted = bulkInsertBlocking(container, documentsToInsert); waitIfNeededForReplicasToCatchUp(this.getClientBuilder()); return documentInserted; } private <T extends Comparable<T>> void queryWithOrderByAndAssert( int pageSize, int documentCount, String query, CosmosAsyncContainer container, Function<TestObject, T> extractProp, List<TestObject> documentsInserted) { List<TestObject> documentsPaged = queryWithContinuationTokens(query, pageSize, container, TestObject.class); List<TestObject> allDocuments = queryWithContinuationTokens(query, documentCount, container, TestObject.class); Comparator<T> validatorComparator = Comparator.nullsFirst(Comparator.<T>naturalOrder()); List<String> expectedResourceIds = sortTestObjectsAndCollectIds(documentsInserted, extractProp, validatorComparator); List<String> docIds1 = documentsPaged.stream().map(TestObject::getId).collect(Collectors.toList()); List<String> docIds2 = allDocuments.stream().map(TestObject::getId).collect(Collectors.toList()); assertThat(docIds2).containsExactlyInAnyOrderElementsOf(expectedResourceIds); assertThat(docIds1).containsExactlyElementsOf(docIds2); } private static CosmosUserDefinedFunctionProperties getCosmosUserDefinedFunctionProperties() { CosmosUserDefinedFunctionProperties udf = new CosmosUserDefinedFunctionProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return udf; } private static CosmosTriggerProperties getCosmosTriggerProperties() { CosmosTriggerProperties trigger = new CosmosTriggerProperties(UUID.randomUUID().toString(), "function() {var " + "x = 10;}"); trigger.setTriggerOperation(TriggerOperation.CREATE); trigger.setTriggerType(TriggerType.PRE); return trigger; } private static CosmosStoredProcedureProperties getCosmosStoredProcedureProperties() { CosmosStoredProcedureProperties storedProcedureDef = new CosmosStoredProcedureProperties(UUID.randomUUID().toString(), "function() {var x = 10;}"); return storedProcedureDef; } static class TestObject { String id; String name; int prop; String mypk; String constantProp = "constantProp"; public TestObject() { } public TestObject(String id, String name, int prop, String mypk) { this.id = id; this.name = name; this.prop = prop; this.mypk = mypk; } public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getProp() { return prop; } public void setProp(final int prop) { this.prop = prop; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public String getConstantProp() { return constantProp; } } }
not sure we need to be consistent across languages on this condition to set headers, but just want to point it. LGTM otherwise.
private HttpPipeline getDefaultHttpPipeline(Configuration buildConfiguration) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPE)); } else if (!CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getSubscriptionKey()) || !CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getApiKey())) { headers.put(OCP_APIM_SUBSCRIPTION_KEY, metricsAdvisorKeyCredential.getSubscriptionKey()); headers.put(API_KEY, metricsAdvisorKeyCredential.getApiKey()); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
|| !CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getApiKey())) {
private HttpPipeline getDefaultHttpPipeline(Configuration buildConfiguration) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPE)); } else if (!CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getSubscriptionKey()) || !CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getApiKey())) { headers.put(OCP_APIM_SUBSCRIPTION_KEY, metricsAdvisorKeyCredential.getSubscriptionKey()); headers.put(API_KEY, metricsAdvisorKeyCredential.getApiKey()); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
class MetricsAdvisorClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER_VALUE = ContentType.APPLICATION_JSON; private static final String ACCEPT_HEADER = "Accept"; private static final String METRICSADVISOR_PROPERTIES = "azure-ai-metricsadvisor.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private static final String DEFAULT_SCOPE = "https: private final ClientLogger logger = new ClientLogger(MetricsAdvisorClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private String endpoint; private MetricsAdvisorKeyCredential metricsAdvisorKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline httpPipeline; private Configuration configuration; private RetryPolicy retryPolicy; private MetricsAdvisorServiceVersion version; static final String OCP_APIM_SUBSCRIPTION_KEY = "Ocp-Apim-Subscription-Key"; static final String API_KEY = "x-api-key"; /** * The constructor with defaults. */ public MetricsAdvisorClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(METRICSADVISOR_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(ACCEPT_HEADER, CONTENT_TYPE_HEADER_VALUE); } /** * Creates a {@link MetricsAdvisorClient} based on options set in the builder. Every time * {@code buildClient()} is called a new instance of {@link MetricsAdvisorClient} is created. * * <p> * If {@link * {@link * settings are ignored. * </p> * * @return A MetricsAdvisorClient with the options set from the builder. * @throws NullPointerException if {@link * {@link * @throws IllegalArgumentException if {@link */ public MetricsAdvisorClient buildClient() { return new MetricsAdvisorClient(buildAsyncClient()); } /** * Creates a {@link MetricsAdvisorAsyncClient} based on options set in the builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link MetricsAdvisorAsyncClient} is created. * * <p> * If {@link * {@link * settings are ignored. * </p> * * @return A MetricsAdvisorAsyncClient with the options set from the builder. * @throws NullPointerException if {@link * {@link * has not been set. * @throws IllegalArgumentException if {@link */ public MetricsAdvisorAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); final Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final MetricsAdvisorServiceVersion serviceVersion = version != null ? version : MetricsAdvisorServiceVersion.getLatest(); HttpPipeline pipeline = httpPipeline; if (pipeline == null) { pipeline = getDefaultHttpPipeline(buildConfiguration); } final AzureCognitiveServiceMetricsAdvisorRestAPIOpenAPIV2Impl advisorRestAPIOpenAPIV2 = new AzureCognitiveServiceMetricsAdvisorRestAPIOpenAPIV2ImplBuilder() .endpoint(endpoint) .pipeline(pipeline) .buildClient(); return new MetricsAdvisorAsyncClient(advisorRestAPIOpenAPIV2, serviceVersion); } /** * Sets the service endpoint for the Azure Metrics Advisor instance. * * @param endpoint The URL of the Azure Metrics Advisor instance service requests to and receive responses from. * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException if {@code endpoint} is null * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ public MetricsAdvisorClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL.", ex)); } if (endpoint.endsWith("/")) { this.endpoint = endpoint.substring(0, endpoint.length() - 1); } else { this.endpoint = endpoint; } return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link MetricsAdvisorClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ public MetricsAdvisorClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; } /** * Sets the {@link MetricsAdvisorKeyCredential} to use when authenticating HTTP requests for this * MetricsAdvisorClientBuilder. * * @param metricsAdvisorKeyCredential {@link MetricsAdvisorKeyCredential} API key credential * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException If {@code metricsAdvisorKeyCredential} is null. */ public MetricsAdvisorClientBuilder credential(MetricsAdvisorKeyCredential metricsAdvisorKeyCredential) { this.metricsAdvisorKeyCredential = Objects.requireNonNull(metricsAdvisorKeyCredential, "'metricsAdvisorKeyCredential' cannot be null."); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If {@code logOptions} isn't provided, the default options will use {@link HttpLogDetailLevel * which will prevent logging.</p> * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException If {@code policy} is null. */ public MetricsAdvisorClientBuilder addPolicy(HttpPipelinePolicy policy) { policies.add(Objects.requireNonNull(policy, "'policy' cannot be null.")); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from * {@link MetricsAdvisorClientBuilder * {@link MetricsAdvisorClient}. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy * <p> * The default retry policy will be used if not provided {@link MetricsAdvisorClientBuilder * to build {@link MetricsAdvisorAsyncClient} or {@link MetricsAdvisorClient}. * * @param retryPolicy user's retry policy applied to each request. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link MetricsAdvisorServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link MetricsAdvisorServiceVersion} of the service to be used when making requests. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder serviceVersion(MetricsAdvisorServiceVersion version) { this.version = version; return this; } }
class MetricsAdvisorClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER_VALUE = ContentType.APPLICATION_JSON; private static final String ACCEPT_HEADER = "Accept"; private static final String METRICSADVISOR_PROPERTIES = "azure-ai-metricsadvisor.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private static final String DEFAULT_SCOPE = "https: private final ClientLogger logger = new ClientLogger(MetricsAdvisorClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private String endpoint; private MetricsAdvisorKeyCredential metricsAdvisorKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline httpPipeline; private Configuration configuration; private RetryPolicy retryPolicy; private MetricsAdvisorServiceVersion version; static final String OCP_APIM_SUBSCRIPTION_KEY = "Ocp-Apim-Subscription-Key"; static final String API_KEY = "x-api-key"; /** * The constructor with defaults. */ public MetricsAdvisorClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(METRICSADVISOR_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(ACCEPT_HEADER, CONTENT_TYPE_HEADER_VALUE); } /** * Creates a {@link MetricsAdvisorClient} based on options set in the builder. Every time * {@code buildClient()} is called a new instance of {@link MetricsAdvisorClient} is created. * * <p> * If {@link * {@link * settings are ignored. * </p> * * @return A MetricsAdvisorClient with the options set from the builder. * @throws NullPointerException if {@link * {@link * @throws IllegalArgumentException if {@link */ public MetricsAdvisorClient buildClient() { return new MetricsAdvisorClient(buildAsyncClient()); } /** * Creates a {@link MetricsAdvisorAsyncClient} based on options set in the builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link MetricsAdvisorAsyncClient} is created. * * <p> * If {@link * {@link * settings are ignored. * </p> * * @return A MetricsAdvisorAsyncClient with the options set from the builder. * @throws NullPointerException if {@link * {@link * has not been set. * @throws IllegalArgumentException if {@link */ public MetricsAdvisorAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); final Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final MetricsAdvisorServiceVersion serviceVersion = version != null ? version : MetricsAdvisorServiceVersion.getLatest(); HttpPipeline pipeline = httpPipeline; if (pipeline == null) { pipeline = getDefaultHttpPipeline(buildConfiguration); } final AzureCognitiveServiceMetricsAdvisorRestAPIOpenAPIV2Impl advisorRestAPIOpenAPIV2 = new AzureCognitiveServiceMetricsAdvisorRestAPIOpenAPIV2ImplBuilder() .endpoint(endpoint) .pipeline(pipeline) .buildClient(); return new MetricsAdvisorAsyncClient(advisorRestAPIOpenAPIV2, serviceVersion); } /** * Sets the service endpoint for the Azure Metrics Advisor instance. * * @param endpoint The URL of the Azure Metrics Advisor instance service requests to and receive responses from. * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException if {@code endpoint} is null * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ public MetricsAdvisorClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL.", ex)); } if (endpoint.endsWith("/")) { this.endpoint = endpoint.substring(0, endpoint.length() - 1); } else { this.endpoint = endpoint; } return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link MetricsAdvisorClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ public MetricsAdvisorClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; } /** * Sets the {@link MetricsAdvisorKeyCredential} to use when authenticating HTTP requests for this * MetricsAdvisorClientBuilder. * * @param metricsAdvisorKeyCredential {@link MetricsAdvisorKeyCredential} API key credential * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException If {@code metricsAdvisorKeyCredential} is null. */ public MetricsAdvisorClientBuilder credential(MetricsAdvisorKeyCredential metricsAdvisorKeyCredential) { this.metricsAdvisorKeyCredential = Objects.requireNonNull(metricsAdvisorKeyCredential, "'metricsAdvisorKeyCredential' cannot be null."); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If {@code logOptions} isn't provided, the default options will use {@link HttpLogDetailLevel * which will prevent logging.</p> * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException If {@code policy} is null. */ public MetricsAdvisorClientBuilder addPolicy(HttpPipelinePolicy policy) { policies.add(Objects.requireNonNull(policy, "'policy' cannot be null.")); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from * {@link MetricsAdvisorClientBuilder * {@link MetricsAdvisorClient}. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy * <p> * The default retry policy will be used if not provided {@link MetricsAdvisorClientBuilder * to build {@link MetricsAdvisorAsyncClient} or {@link MetricsAdvisorClient}. * * @param retryPolicy user's retry policy applied to each request. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link MetricsAdvisorServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link MetricsAdvisorServiceVersion} of the service to be used when making requests. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder serviceVersion(MetricsAdvisorServiceVersion version) { this.version = version; return this; } }
Confirmed with other langs and this is the expected behavior.
private HttpPipeline getDefaultHttpPipeline(Configuration buildConfiguration) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPE)); } else if (!CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getSubscriptionKey()) || !CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getApiKey())) { headers.put(OCP_APIM_SUBSCRIPTION_KEY, metricsAdvisorKeyCredential.getSubscriptionKey()); headers.put(API_KEY, metricsAdvisorKeyCredential.getApiKey()); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
|| !CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getApiKey())) {
private HttpPipeline getDefaultHttpPipeline(Configuration buildConfiguration) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPE)); } else if (!CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getSubscriptionKey()) || !CoreUtils.isNullOrEmpty(metricsAdvisorKeyCredential.getApiKey())) { headers.put(OCP_APIM_SUBSCRIPTION_KEY, metricsAdvisorKeyCredential.getSubscriptionKey()); headers.put(API_KEY, metricsAdvisorKeyCredential.getApiKey()); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); }
class MetricsAdvisorClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER_VALUE = ContentType.APPLICATION_JSON; private static final String ACCEPT_HEADER = "Accept"; private static final String METRICSADVISOR_PROPERTIES = "azure-ai-metricsadvisor.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private static final String DEFAULT_SCOPE = "https: private final ClientLogger logger = new ClientLogger(MetricsAdvisorClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private String endpoint; private MetricsAdvisorKeyCredential metricsAdvisorKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline httpPipeline; private Configuration configuration; private RetryPolicy retryPolicy; private MetricsAdvisorServiceVersion version; static final String OCP_APIM_SUBSCRIPTION_KEY = "Ocp-Apim-Subscription-Key"; static final String API_KEY = "x-api-key"; /** * The constructor with defaults. */ public MetricsAdvisorClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(METRICSADVISOR_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(ACCEPT_HEADER, CONTENT_TYPE_HEADER_VALUE); } /** * Creates a {@link MetricsAdvisorClient} based on options set in the builder. Every time * {@code buildClient()} is called a new instance of {@link MetricsAdvisorClient} is created. * * <p> * If {@link * {@link * settings are ignored. * </p> * * @return A MetricsAdvisorClient with the options set from the builder. * @throws NullPointerException if {@link * {@link * @throws IllegalArgumentException if {@link */ public MetricsAdvisorClient buildClient() { return new MetricsAdvisorClient(buildAsyncClient()); } /** * Creates a {@link MetricsAdvisorAsyncClient} based on options set in the builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link MetricsAdvisorAsyncClient} is created. * * <p> * If {@link * {@link * settings are ignored. * </p> * * @return A MetricsAdvisorAsyncClient with the options set from the builder. * @throws NullPointerException if {@link * {@link * has not been set. * @throws IllegalArgumentException if {@link */ public MetricsAdvisorAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); final Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final MetricsAdvisorServiceVersion serviceVersion = version != null ? version : MetricsAdvisorServiceVersion.getLatest(); HttpPipeline pipeline = httpPipeline; if (pipeline == null) { pipeline = getDefaultHttpPipeline(buildConfiguration); } final AzureCognitiveServiceMetricsAdvisorRestAPIOpenAPIV2Impl advisorRestAPIOpenAPIV2 = new AzureCognitiveServiceMetricsAdvisorRestAPIOpenAPIV2ImplBuilder() .endpoint(endpoint) .pipeline(pipeline) .buildClient(); return new MetricsAdvisorAsyncClient(advisorRestAPIOpenAPIV2, serviceVersion); } /** * Sets the service endpoint for the Azure Metrics Advisor instance. * * @param endpoint The URL of the Azure Metrics Advisor instance service requests to and receive responses from. * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException if {@code endpoint} is null * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ public MetricsAdvisorClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL.", ex)); } if (endpoint.endsWith("/")) { this.endpoint = endpoint.substring(0, endpoint.length() - 1); } else { this.endpoint = endpoint; } return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link MetricsAdvisorClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ public MetricsAdvisorClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; } /** * Sets the {@link MetricsAdvisorKeyCredential} to use when authenticating HTTP requests for this * MetricsAdvisorClientBuilder. * * @param metricsAdvisorKeyCredential {@link MetricsAdvisorKeyCredential} API key credential * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException If {@code metricsAdvisorKeyCredential} is null. */ public MetricsAdvisorClientBuilder credential(MetricsAdvisorKeyCredential metricsAdvisorKeyCredential) { this.metricsAdvisorKeyCredential = Objects.requireNonNull(metricsAdvisorKeyCredential, "'metricsAdvisorKeyCredential' cannot be null."); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If {@code logOptions} isn't provided, the default options will use {@link HttpLogDetailLevel * which will prevent logging.</p> * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException If {@code policy} is null. */ public MetricsAdvisorClientBuilder addPolicy(HttpPipelinePolicy policy) { policies.add(Objects.requireNonNull(policy, "'policy' cannot be null.")); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from * {@link MetricsAdvisorClientBuilder * {@link MetricsAdvisorClient}. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy * <p> * The default retry policy will be used if not provided {@link MetricsAdvisorClientBuilder * to build {@link MetricsAdvisorAsyncClient} or {@link MetricsAdvisorClient}. * * @param retryPolicy user's retry policy applied to each request. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link MetricsAdvisorServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link MetricsAdvisorServiceVersion} of the service to be used when making requests. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder serviceVersion(MetricsAdvisorServiceVersion version) { this.version = version; return this; } }
class MetricsAdvisorClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER_VALUE = ContentType.APPLICATION_JSON; private static final String ACCEPT_HEADER = "Accept"; private static final String METRICSADVISOR_PROPERTIES = "azure-ai-metricsadvisor.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private static final String DEFAULT_SCOPE = "https: private final ClientLogger logger = new ClientLogger(MetricsAdvisorClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private String endpoint; private MetricsAdvisorKeyCredential metricsAdvisorKeyCredential; private TokenCredential tokenCredential; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline httpPipeline; private Configuration configuration; private RetryPolicy retryPolicy; private MetricsAdvisorServiceVersion version; static final String OCP_APIM_SUBSCRIPTION_KEY = "Ocp-Apim-Subscription-Key"; static final String API_KEY = "x-api-key"; /** * The constructor with defaults. */ public MetricsAdvisorClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(METRICSADVISOR_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(ACCEPT_HEADER, CONTENT_TYPE_HEADER_VALUE); } /** * Creates a {@link MetricsAdvisorClient} based on options set in the builder. Every time * {@code buildClient()} is called a new instance of {@link MetricsAdvisorClient} is created. * * <p> * If {@link * {@link * settings are ignored. * </p> * * @return A MetricsAdvisorClient with the options set from the builder. * @throws NullPointerException if {@link * {@link * @throws IllegalArgumentException if {@link */ public MetricsAdvisorClient buildClient() { return new MetricsAdvisorClient(buildAsyncClient()); } /** * Creates a {@link MetricsAdvisorAsyncClient} based on options set in the builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link MetricsAdvisorAsyncClient} is created. * * <p> * If {@link * {@link * settings are ignored. * </p> * * @return A MetricsAdvisorAsyncClient with the options set from the builder. * @throws NullPointerException if {@link * {@link * has not been set. * @throws IllegalArgumentException if {@link */ public MetricsAdvisorAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); final Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; final MetricsAdvisorServiceVersion serviceVersion = version != null ? version : MetricsAdvisorServiceVersion.getLatest(); HttpPipeline pipeline = httpPipeline; if (pipeline == null) { pipeline = getDefaultHttpPipeline(buildConfiguration); } final AzureCognitiveServiceMetricsAdvisorRestAPIOpenAPIV2Impl advisorRestAPIOpenAPIV2 = new AzureCognitiveServiceMetricsAdvisorRestAPIOpenAPIV2ImplBuilder() .endpoint(endpoint) .pipeline(pipeline) .buildClient(); return new MetricsAdvisorAsyncClient(advisorRestAPIOpenAPIV2, serviceVersion); } /** * Sets the service endpoint for the Azure Metrics Advisor instance. * * @param endpoint The URL of the Azure Metrics Advisor instance service requests to and receive responses from. * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException if {@code endpoint} is null * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ public MetricsAdvisorClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL.", ex)); } if (endpoint.endsWith("/")) { this.endpoint = endpoint.substring(0, endpoint.length() - 1); } else { this.endpoint = endpoint; } return this; } /** * Sets the {@link TokenCredential} used to authenticate HTTP requests. * * @param tokenCredential {@link TokenCredential} used to authenticate HTTP requests. * @return The updated {@link MetricsAdvisorClientBuilder} object. * @throws NullPointerException If {@code tokenCredential} is null. */ public MetricsAdvisorClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = Objects.requireNonNull(tokenCredential, "'tokenCredential' cannot be null."); return this; } /** * Sets the {@link MetricsAdvisorKeyCredential} to use when authenticating HTTP requests for this * MetricsAdvisorClientBuilder. * * @param metricsAdvisorKeyCredential {@link MetricsAdvisorKeyCredential} API key credential * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException If {@code metricsAdvisorKeyCredential} is null. */ public MetricsAdvisorClientBuilder credential(MetricsAdvisorKeyCredential metricsAdvisorKeyCredential) { this.metricsAdvisorKeyCredential = Objects.requireNonNull(metricsAdvisorKeyCredential, "'metricsAdvisorKeyCredential' cannot be null."); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If {@code logOptions} isn't provided, the default options will use {@link HttpLogDetailLevel * which will prevent logging.</p> * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * * @return The updated MetricsAdvisorClientBuilder object. * @throws NullPointerException If {@code policy} is null. */ public MetricsAdvisorClientBuilder addPolicy(HttpPipelinePolicy policy) { policies.add(Objects.requireNonNull(policy, "'policy' cannot be null.")); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from * {@link MetricsAdvisorClientBuilder * {@link MetricsAdvisorClient}. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy * <p> * The default retry policy will be used if not provided {@link MetricsAdvisorClientBuilder * to build {@link MetricsAdvisorAsyncClient} or {@link MetricsAdvisorClient}. * * @param retryPolicy user's retry policy applied to each request. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link MetricsAdvisorServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link MetricsAdvisorServiceVersion} of the service to be used when making requests. * * @return The updated MetricsAdvisorClientBuilder object. */ public MetricsAdvisorClientBuilder serviceVersion(MetricsAdvisorServiceVersion version) { this.version = version; return this; } }
Generally, a playback client should only be used when `TestMode` is `PLAYBACK`
QuantumClientBuilder getClientBuilder(HttpClient httpClient) { QuantumClientBuilder builder = new QuantumClientBuilder(); if (getTestMode() == TestMode.RECORD) { builder.addPolicy(interceptorManager.getRecordPolicy()); } return builder .httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient) .credential(new AzureCliCredentialBuilder().build()) .subscriptionId(subscriptionId) .resourceGroupName(resourceGroup) .workspaceName(workspaceName) .host(getEndpoint()); }
.httpClient(httpClient == null ? interceptorManager.getPlaybackClient() : httpClient)
QuantumClientBuilder getClientBuilder(HttpClient httpClient) { QuantumClientBuilder builder = new QuantumClientBuilder(); if (getTestMode() == TestMode.RECORD) { builder.addPolicy(interceptorManager.getRecordPolicy()); } return builder .httpClient(interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient) .credential(new AzureCliCredentialBuilder().build()) .subscriptionId(getSubscriptionId()) .resourceGroupName(getResourceGroup()) .workspaceName(getWorkspaceName()) .host(getEndpoint()); }
class QuantumClientTestBase extends TestBase { private final String endpoint = Configuration.getGlobalConfiguration().get("QUANTUM_ENDPOINT"); private final String subscriptionId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID); private final String resourceGroup = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_RESOURCE_GROUP); private final String workspaceName = Configuration.getGlobalConfiguration().get("QUANTUM_WORKSPACE"); String getEndpoint() { return interceptorManager.isPlaybackMode() ? "https: : endpoint; } }
class QuantumClientTestBase extends TestBase { private final String endpoint = Configuration.getGlobalConfiguration().get("QUANTUM_ENDPOINT"); private final String subscriptionId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID); private final String resourceGroup = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_RESOURCE_GROUP); private final String workspaceName = Configuration.getGlobalConfiguration().get("QUANTUM_WORKSPACE"); String getEndpoint() { return interceptorManager.isPlaybackMode() ? "https: : endpoint; } String getSubscriptionId() { return testResourceNamer.recordValueFromConfig(subscriptionId); } String getResourceGroup() { return testResourceNamer.recordValueFromConfig(resourceGroup); } String getWorkspaceName() { return testResourceNamer.recordValueFromConfig(workspaceName); } }
1000000000 can be extracted as a static constant.
private static double getOperationsPerSecond() { return IntStream.range(0, completedOperations.length) .mapToDouble(i -> completedOperations[i] / (((double) lastCompletionNanoTimes[i]) / 1000000000)) .sum(); }
.mapToDouble(i -> completedOperations[i] / (((double) lastCompletionNanoTimes[i]) / 1000000000))
private static double getOperationsPerSecond() { return IntStream.range(0, completedOperations.length) .mapToDouble(i -> completedOperations[i] / (((double) lastCompletionNanoTimes[i]) / NANOSECONDS_PER_SECOND)) .sum(); }
class PerfStressProgram { private static int[] completedOperations; private static long[] lastCompletionNanoTimes; private static int getCompletedOperations() { return IntStream.of(completedOperations).sum(); } /** * Runs the performance tests passed to be executed. * * @throws RuntimeException if the execution fails. * @param classes the performance test classes to execute. * @param args the command line arguments ro run performance tests with. */ public static void run(Class<?>[] classes, String[] args) { List<Class<?>> classList = new ArrayList<>(Arrays.asList(classes)); try { classList.add(Class.forName("com.azure.perf.test.core.NoOpTest")); classList.add(Class.forName("com.azure.perf.test.core.ExceptionTest")); classList.add(Class.forName("com.azure.perf.test.core.SleepTest")); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } String[] commands = classList.stream().map(c -> getCommandName(c.getSimpleName())) .toArray(i -> new String[i]); PerfStressOptions[] options = classList.stream().map(c -> { try { return c.getConstructors()[0].getParameterTypes()[0].getConstructors()[0].newInstance(); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException e) { throw new RuntimeException(e); } }).toArray(i -> new PerfStressOptions[i]); JCommander jc = new JCommander(); for (int i = 0; i < commands.length; i++) { jc.addCommand(commands[i], options[i]); } jc.parse(args); String parsedCommand = jc.getParsedCommand(); if (parsedCommand == null || parsedCommand.isEmpty()) { jc.usage(); } else { int index = Arrays.asList(commands).indexOf(parsedCommand); run(classList.get(index), options[index]); } } private static String getCommandName(String testName) { String lower = testName.toLowerCase(); return lower.endsWith("test") ? lower.substring(0, lower.length() - 4) : lower; } /** * Run the performance test passed to be executed. * * @throws RuntimeException if the execution fails. * @param testClass the performance test class to execute. * @param options the configuration ro run performance test with. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfStressTest<?>[] tests = new PerfStressTest<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfStressTest<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); try { Flux.just(tests).flatMap(PerfStressTest::setupAsync).blockLast(); setupStatus.dispose(); if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } Flux.just(tests).flatMap(t -> t.cleanupAsync()).blockLast(); } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } } /** * Runs the performance tests passed to be executed. * * @throws RuntimeException if the execution fails. * @param tests the performance tests to be executed. * @param sync indicate if synchronous test should be run. * @param parallel the number of parallel threads to run the performance test on. * @param durationSeconds the duration for which performance test should be run on. * @param title the title of the performance tests. */ public static void runTests(PerfStressTest<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { completedOperations = new int[parallel]; lastCompletionNanoTimes = new long[parallel]; long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); int[] lastCompleted = new int[] { 0 }; Disposable progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { int totalCompleted = getCompletedOperations(); int currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.0f", currentCompleted, totalCompleted, averageCompleted); }, true, true); if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); try { forkJoinPool.submit(() -> { IntStream.range(0, parallel).parallel().forEach(i -> runLoop(tests[i], i, endNanoTime)); }).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } else { Flux.range(0, parallel) .parallel() .runOn(Schedulers.boundedElastic()) .flatMap(i -> runLoopAsync(tests[i], i, endNanoTime)) .then() .block(); } progressStatus.dispose(); System.out.println("=== Results ==="); int totalOperations = getCompletedOperations(); double operationsPerSecond = getOperationsPerSecond(); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %,.2fs (%,.2f ops/s, %,.3f s/op)%n", totalOperations, weightedAverageSeconds, operationsPerSecond, secondsPerOperation); System.out.println(); } private static void runLoop(PerfStressTest<?> test, int index, long endNanoTime) { long startNanoTime = System.nanoTime(); while (System.nanoTime() < endNanoTime) { test.run(); completedOperations[index]++; lastCompletionNanoTimes[index] = System.nanoTime() - startNanoTime; } } private static Mono<Void> runLoopAsync(PerfStressTest<?> test, int index, long endNanoTime) { long startNanoTime = System.nanoTime(); return Flux.just(1) .repeat() .flatMap(i -> test.runAsync().then(Mono.just(1)), 1) .doOnNext(v -> { completedOperations[index]++; lastCompletionNanoTimes[index] = System.nanoTime() - startNanoTime; }) .take(Duration.ofNanos(endNanoTime - startNanoTime)) .then(); } private static Disposable printStatus(String header, Supplier<Object> status, boolean newLine, boolean printFinalStatus) { System.out.println(header); boolean[] needsExtraNewline = new boolean[] { false }; return Flux.interval(Duration.ofSeconds(1)).doFinally(s -> { if (printFinalStatus) { printStatusHelper(status, newLine, needsExtraNewline); } if (needsExtraNewline[0]) { System.out.println(); } System.out.println(); }).subscribe(i -> { printStatusHelper(status, newLine, needsExtraNewline); }); } private static void printStatusHelper(Supplier<Object> status, boolean newLine, boolean[] needsExtraNewline) { Object obj = status.get(); if (newLine) { System.out.println(obj); } else { System.out.print(obj); needsExtraNewline[0] = true; } } }
class PerfStressProgram { private static final int NANOSECONDS_PER_SECOND = 1_000_000_000; private static int[] completedOperations; private static long[] lastCompletionNanoTimes; private static int getCompletedOperations() { return IntStream.of(completedOperations).sum(); } /** * Runs the performance tests passed to be executed. * * @throws RuntimeException if the execution fails. * @param classes the performance test classes to execute. * @param args the command line arguments ro run performance tests with. */ public static void run(Class<?>[] classes, String[] args) { List<Class<?>> classList = new ArrayList<>(Arrays.asList(classes)); try { classList.add(Class.forName("com.azure.perf.test.core.NoOpTest")); classList.add(Class.forName("com.azure.perf.test.core.ExceptionTest")); classList.add(Class.forName("com.azure.perf.test.core.SleepTest")); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } String[] commands = classList.stream().map(c -> getCommandName(c.getSimpleName())) .toArray(i -> new String[i]); PerfStressOptions[] options = classList.stream().map(c -> { try { return c.getConstructors()[0].getParameterTypes()[0].getConstructors()[0].newInstance(); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException e) { throw new RuntimeException(e); } }).toArray(i -> new PerfStressOptions[i]); JCommander jc = new JCommander(); for (int i = 0; i < commands.length; i++) { jc.addCommand(commands[i], options[i]); } jc.parse(args); String parsedCommand = jc.getParsedCommand(); if (parsedCommand == null || parsedCommand.isEmpty()) { jc.usage(); } else { int index = Arrays.asList(commands).indexOf(parsedCommand); run(classList.get(index), options[index]); } } private static String getCommandName(String testName) { String lower = testName.toLowerCase(); return lower.endsWith("test") ? lower.substring(0, lower.length() - 4) : lower; } /** * Run the performance test passed to be executed. * * @throws RuntimeException if the execution fails. * @param testClass the performance test class to execute. * @param options the configuration ro run performance test with. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfStressTest<?>[] tests = new PerfStressTest<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfStressTest<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); try { Flux.just(tests).flatMap(PerfStressTest::setupAsync).blockLast(); setupStatus.dispose(); if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } Flux.just(tests).flatMap(t -> t.cleanupAsync()).blockLast(); } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } } /** * Runs the performance tests passed to be executed. * * @throws RuntimeException if the execution fails. * @param tests the performance tests to be executed. * @param sync indicate if synchronous test should be run. * @param parallel the number of parallel threads to run the performance test on. * @param durationSeconds the duration for which performance test should be run on. * @param title the title of the performance tests. */ public static void runTests(PerfStressTest<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { completedOperations = new int[parallel]; lastCompletionNanoTimes = new long[parallel]; long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); int[] lastCompleted = new int[] { 0 }; Disposable progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { int totalCompleted = getCompletedOperations(); int currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); try { forkJoinPool.submit(() -> { IntStream.range(0, parallel).parallel().forEach(i -> runLoop(tests[i], i, endNanoTime)); }).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } else { Flux.range(0, parallel) .parallel() .runOn(Schedulers.boundedElastic()) .flatMap(i -> runLoopAsync(tests[i], i, endNanoTime)) .then() .block(); } progressStatus.dispose(); System.out.println("=== Results ==="); int totalOperations = getCompletedOperations(); double operationsPerSecond = getOperationsPerSecond(); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %,.2fs (%,.2f ops/s, %,.3f s/op)%n", totalOperations, weightedAverageSeconds, operationsPerSecond, secondsPerOperation); System.out.println(); } private static void runLoop(PerfStressTest<?> test, int index, long endNanoTime) { long startNanoTime = System.nanoTime(); while (System.nanoTime() < endNanoTime) { test.run(); completedOperations[index]++; lastCompletionNanoTimes[index] = System.nanoTime() - startNanoTime; } } private static Mono<Void> runLoopAsync(PerfStressTest<?> test, int index, long endNanoTime) { long startNanoTime = System.nanoTime(); return Flux.just(1) .repeat() .flatMap(i -> test.runAsync().then(Mono.just(1)), 1) .doOnNext(v -> { completedOperations[index]++; lastCompletionNanoTimes[index] = System.nanoTime() - startNanoTime; }) .take(Duration.ofNanos(endNanoTime - startNanoTime)) .then(); } private static Disposable printStatus(String header, Supplier<Object> status, boolean newLine, boolean printFinalStatus) { System.out.println(header); boolean[] needsExtraNewline = new boolean[] { false }; return Flux.interval(Duration.ofSeconds(1)).doFinally(s -> { if (printFinalStatus) { printStatusHelper(status, newLine, needsExtraNewline); } if (needsExtraNewline[0]) { System.out.println(); } System.out.println(); }).subscribe(i -> { printStatusHelper(status, newLine, needsExtraNewline); }); } private static void printStatusHelper(Supplier<Object> status, boolean newLine, boolean[] needsExtraNewline) { Object obj = status.get(); if (newLine) { System.out.println(obj); } else { System.out.print(obj); needsExtraNewline[0] = true; } } }
This is already checked for this by the caller, validating that the header name `startsWith` the prefix.
void addHeader(String headerName, String headerValue) { values.put(headerName.substring(prefixLength), headerValue); }
values.put(headerName.substring(prefixLength), headerValue);
void addHeader(String headerName, String headerValue) { values.put(headerName.substring(prefixLength), headerValue); }
class HeaderCollectionHandler { private final String prefix; private final int prefixLength; private final Map<String, String> values; private final Field declaringField; HeaderCollectionHandler(String prefix, Field declaringField) { this.prefix = prefix; this.prefixLength = prefix.length(); this.values = new HashMap<>(); this.declaringField = declaringField; } boolean headerStartsWithPrefix(String headerName) { return headerName.startsWith(prefix); } void injectValuesIntoDeclaringField(Object deserializedHeaders, ClientLogger logger) { /* * First check if the deserialized headers type has a public setter. */ if (usePublicSetter(deserializedHeaders, logger)) { return; } logger.verbose("Failed to find or use public setter to set header collection."); /* * Otherwise fallback to setting the field directly. */ final boolean declaredFieldAccessibleBackup = declaringField.isAccessible(); try { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(true); return null; }); } declaringField.set(deserializedHeaders, values); logger.verbose("Set header collection by accessing the field directly."); } catch (IllegalAccessException ex) { logger.warning("Failed to inject header collection values into deserialized headers.", ex); } finally { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(false); return null; }); } } } private boolean usePublicSetter(Object deserializedHeaders, ClientLogger logger) { try { String potentialSetterName = getPotentialSetterName(); Method setterMethod = deserializedHeaders.getClass().getDeclaredMethod(potentialSetterName, Map.class); if (Modifier.isPublic(setterMethod.getModifiers())) { setterMethod.invoke(deserializedHeaders, values); logger.verbose("User setter %s on class %s to set header collection.", potentialSetterName, deserializedHeaders.getClass().getSimpleName()); return true; } return false; } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException ignored) { return false; } } private String getPotentialSetterName() { String fieldName = declaringField.getName(); return "set" + fieldName.substring(0, 1).toUpperCase(Locale.ROOT) + fieldName.substring(1); } }
class HeaderCollectionHandler { private final String prefix; private final int prefixLength; private final Map<String, String> values; private final Field declaringField; HeaderCollectionHandler(String prefix, Field declaringField) { this.prefix = prefix; this.prefixLength = prefix.length(); this.values = new HashMap<>(); this.declaringField = declaringField; } boolean headerStartsWithPrefix(String headerName) { return headerName.startsWith(prefix); } @SuppressWarnings("deprecation") void injectValuesIntoDeclaringField(Object deserializedHeaders, ClientLogger logger) { /* * First check if the deserialized headers type has a public setter. */ if (usePublicSetter(deserializedHeaders, logger)) { return; } logger.verbose("Failed to find or use public setter to set header collection."); /* * Otherwise fallback to setting the field directly. */ final boolean declaredFieldAccessibleBackup = declaringField.isAccessible(); try { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(true); return null; }); } declaringField.set(deserializedHeaders, values); logger.verbose("Set header collection by accessing the field directly."); } catch (IllegalAccessException ex) { logger.warning("Failed to inject header collection values into deserialized headers.", ex); } finally { if (!declaredFieldAccessibleBackup) { AccessController.doPrivileged((PrivilegedAction<Object>) () -> { declaringField.setAccessible(false); return null; }); } } } private boolean usePublicSetter(Object deserializedHeaders, ClientLogger logger) { try { String potentialSetterName = getPotentialSetterName(); Method setterMethod = deserializedHeaders.getClass().getDeclaredMethod(potentialSetterName, Map.class); if (Modifier.isPublic(setterMethod.getModifiers())) { setterMethod.invoke(deserializedHeaders, values); logger.verbose("User setter %s on class %s to set header collection.", potentialSetterName, deserializedHeaders.getClass().getSimpleName()); return true; } return false; } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException ignored) { return false; } } private String getPotentialSetterName() { String fieldName = declaringField.getName(); return "set" + fieldName.substring(0, 1).toUpperCase(Locale.ROOT) + fieldName.substring(1); } }
Fixed
private static double getOperationsPerSecond() { return IntStream.range(0, completedOperations.length) .mapToDouble(i -> completedOperations[i] / (((double) lastCompletionNanoTimes[i]) / 1000000000)) .sum(); }
.mapToDouble(i -> completedOperations[i] / (((double) lastCompletionNanoTimes[i]) / 1000000000))
private static double getOperationsPerSecond() { return IntStream.range(0, completedOperations.length) .mapToDouble(i -> completedOperations[i] / (((double) lastCompletionNanoTimes[i]) / NANOSECONDS_PER_SECOND)) .sum(); }
class PerfStressProgram { private static int[] completedOperations; private static long[] lastCompletionNanoTimes; private static int getCompletedOperations() { return IntStream.of(completedOperations).sum(); } /** * Runs the performance tests passed to be executed. * * @throws RuntimeException if the execution fails. * @param classes the performance test classes to execute. * @param args the command line arguments ro run performance tests with. */ public static void run(Class<?>[] classes, String[] args) { List<Class<?>> classList = new ArrayList<>(Arrays.asList(classes)); try { classList.add(Class.forName("com.azure.perf.test.core.NoOpTest")); classList.add(Class.forName("com.azure.perf.test.core.ExceptionTest")); classList.add(Class.forName("com.azure.perf.test.core.SleepTest")); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } String[] commands = classList.stream().map(c -> getCommandName(c.getSimpleName())) .toArray(i -> new String[i]); PerfStressOptions[] options = classList.stream().map(c -> { try { return c.getConstructors()[0].getParameterTypes()[0].getConstructors()[0].newInstance(); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException e) { throw new RuntimeException(e); } }).toArray(i -> new PerfStressOptions[i]); JCommander jc = new JCommander(); for (int i = 0; i < commands.length; i++) { jc.addCommand(commands[i], options[i]); } jc.parse(args); String parsedCommand = jc.getParsedCommand(); if (parsedCommand == null || parsedCommand.isEmpty()) { jc.usage(); } else { int index = Arrays.asList(commands).indexOf(parsedCommand); run(classList.get(index), options[index]); } } private static String getCommandName(String testName) { String lower = testName.toLowerCase(); return lower.endsWith("test") ? lower.substring(0, lower.length() - 4) : lower; } /** * Run the performance test passed to be executed. * * @throws RuntimeException if the execution fails. * @param testClass the performance test class to execute. * @param options the configuration ro run performance test with. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfStressTest<?>[] tests = new PerfStressTest<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfStressTest<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); try { Flux.just(tests).flatMap(PerfStressTest::setupAsync).blockLast(); setupStatus.dispose(); if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } Flux.just(tests).flatMap(t -> t.cleanupAsync()).blockLast(); } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } } /** * Runs the performance tests passed to be executed. * * @throws RuntimeException if the execution fails. * @param tests the performance tests to be executed. * @param sync indicate if synchronous test should be run. * @param parallel the number of parallel threads to run the performance test on. * @param durationSeconds the duration for which performance test should be run on. * @param title the title of the performance tests. */ public static void runTests(PerfStressTest<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { completedOperations = new int[parallel]; lastCompletionNanoTimes = new long[parallel]; long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); int[] lastCompleted = new int[] { 0 }; Disposable progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { int totalCompleted = getCompletedOperations(); int currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.0f", currentCompleted, totalCompleted, averageCompleted); }, true, true); if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); try { forkJoinPool.submit(() -> { IntStream.range(0, parallel).parallel().forEach(i -> runLoop(tests[i], i, endNanoTime)); }).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } else { Flux.range(0, parallel) .parallel() .runOn(Schedulers.boundedElastic()) .flatMap(i -> runLoopAsync(tests[i], i, endNanoTime)) .then() .block(); } progressStatus.dispose(); System.out.println("=== Results ==="); int totalOperations = getCompletedOperations(); double operationsPerSecond = getOperationsPerSecond(); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %,.2fs (%,.2f ops/s, %,.3f s/op)%n", totalOperations, weightedAverageSeconds, operationsPerSecond, secondsPerOperation); System.out.println(); } private static void runLoop(PerfStressTest<?> test, int index, long endNanoTime) { long startNanoTime = System.nanoTime(); while (System.nanoTime() < endNanoTime) { test.run(); completedOperations[index]++; lastCompletionNanoTimes[index] = System.nanoTime() - startNanoTime; } } private static Mono<Void> runLoopAsync(PerfStressTest<?> test, int index, long endNanoTime) { long startNanoTime = System.nanoTime(); return Flux.just(1) .repeat() .flatMap(i -> test.runAsync().then(Mono.just(1)), 1) .doOnNext(v -> { completedOperations[index]++; lastCompletionNanoTimes[index] = System.nanoTime() - startNanoTime; }) .take(Duration.ofNanos(endNanoTime - startNanoTime)) .then(); } private static Disposable printStatus(String header, Supplier<Object> status, boolean newLine, boolean printFinalStatus) { System.out.println(header); boolean[] needsExtraNewline = new boolean[] { false }; return Flux.interval(Duration.ofSeconds(1)).doFinally(s -> { if (printFinalStatus) { printStatusHelper(status, newLine, needsExtraNewline); } if (needsExtraNewline[0]) { System.out.println(); } System.out.println(); }).subscribe(i -> { printStatusHelper(status, newLine, needsExtraNewline); }); } private static void printStatusHelper(Supplier<Object> status, boolean newLine, boolean[] needsExtraNewline) { Object obj = status.get(); if (newLine) { System.out.println(obj); } else { System.out.print(obj); needsExtraNewline[0] = true; } } }
class PerfStressProgram { private static final int NANOSECONDS_PER_SECOND = 1_000_000_000; private static int[] completedOperations; private static long[] lastCompletionNanoTimes; private static int getCompletedOperations() { return IntStream.of(completedOperations).sum(); } /** * Runs the performance tests passed to be executed. * * @throws RuntimeException if the execution fails. * @param classes the performance test classes to execute. * @param args the command line arguments ro run performance tests with. */ public static void run(Class<?>[] classes, String[] args) { List<Class<?>> classList = new ArrayList<>(Arrays.asList(classes)); try { classList.add(Class.forName("com.azure.perf.test.core.NoOpTest")); classList.add(Class.forName("com.azure.perf.test.core.ExceptionTest")); classList.add(Class.forName("com.azure.perf.test.core.SleepTest")); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } String[] commands = classList.stream().map(c -> getCommandName(c.getSimpleName())) .toArray(i -> new String[i]); PerfStressOptions[] options = classList.stream().map(c -> { try { return c.getConstructors()[0].getParameterTypes()[0].getConstructors()[0].newInstance(); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException e) { throw new RuntimeException(e); } }).toArray(i -> new PerfStressOptions[i]); JCommander jc = new JCommander(); for (int i = 0; i < commands.length; i++) { jc.addCommand(commands[i], options[i]); } jc.parse(args); String parsedCommand = jc.getParsedCommand(); if (parsedCommand == null || parsedCommand.isEmpty()) { jc.usage(); } else { int index = Arrays.asList(commands).indexOf(parsedCommand); run(classList.get(index), options[index]); } } private static String getCommandName(String testName) { String lower = testName.toLowerCase(); return lower.endsWith("test") ? lower.substring(0, lower.length() - 4) : lower; } /** * Run the performance test passed to be executed. * * @throws RuntimeException if the execution fails. * @param testClass the performance test class to execute. * @param options the configuration ro run performance test with. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Disposable setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Disposable cleanupStatus = null; PerfStressTest<?>[] tests = new PerfStressTest<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfStressTest<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | SecurityException | NoSuchMethodException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); try { Flux.just(tests).flatMap(PerfStressTest::setupAsync).blockLast(); setupStatus.dispose(); if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } Flux.just(tests).flatMap(t -> t.cleanupAsync()).blockLast(); } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.dispose(); } } /** * Runs the performance tests passed to be executed. * * @throws RuntimeException if the execution fails. * @param tests the performance tests to be executed. * @param sync indicate if synchronous test should be run. * @param parallel the number of parallel threads to run the performance test on. * @param durationSeconds the duration for which performance test should be run on. * @param title the title of the performance tests. */ public static void runTests(PerfStressTest<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { completedOperations = new int[parallel]; lastCompletionNanoTimes = new long[parallel]; long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); int[] lastCompleted = new int[] { 0 }; Disposable progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { int totalCompleted = getCompletedOperations(); int currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); try { forkJoinPool.submit(() -> { IntStream.range(0, parallel).parallel().forEach(i -> runLoop(tests[i], i, endNanoTime)); }).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } } else { Flux.range(0, parallel) .parallel() .runOn(Schedulers.boundedElastic()) .flatMap(i -> runLoopAsync(tests[i], i, endNanoTime)) .then() .block(); } progressStatus.dispose(); System.out.println("=== Results ==="); int totalOperations = getCompletedOperations(); double operationsPerSecond = getOperationsPerSecond(); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %,.2fs (%,.2f ops/s, %,.3f s/op)%n", totalOperations, weightedAverageSeconds, operationsPerSecond, secondsPerOperation); System.out.println(); } private static void runLoop(PerfStressTest<?> test, int index, long endNanoTime) { long startNanoTime = System.nanoTime(); while (System.nanoTime() < endNanoTime) { test.run(); completedOperations[index]++; lastCompletionNanoTimes[index] = System.nanoTime() - startNanoTime; } } private static Mono<Void> runLoopAsync(PerfStressTest<?> test, int index, long endNanoTime) { long startNanoTime = System.nanoTime(); return Flux.just(1) .repeat() .flatMap(i -> test.runAsync().then(Mono.just(1)), 1) .doOnNext(v -> { completedOperations[index]++; lastCompletionNanoTimes[index] = System.nanoTime() - startNanoTime; }) .take(Duration.ofNanos(endNanoTime - startNanoTime)) .then(); } private static Disposable printStatus(String header, Supplier<Object> status, boolean newLine, boolean printFinalStatus) { System.out.println(header); boolean[] needsExtraNewline = new boolean[] { false }; return Flux.interval(Duration.ofSeconds(1)).doFinally(s -> { if (printFinalStatus) { printStatusHelper(status, newLine, needsExtraNewline); } if (needsExtraNewline[0]) { System.out.println(); } System.out.println(); }).subscribe(i -> { printStatusHelper(status, newLine, needsExtraNewline); }); } private static void printStatusHelper(Supplier<Object> status, boolean newLine, boolean[] needsExtraNewline) { Object obj = status.get(); if (newLine) { System.out.println(obj); } else { System.out.print(obj); needsExtraNewline[0] = true; } } }
The apply session token today is a little bit different today across Java v2, v4 and .net SDK. 1. sessionTokenApplicable: In java V4: we also skip applying the session token when Eventual Consistency is explicitly requested on request-level for data plane operations. This is not included in V2 and .net SDK. 2. If user pass the session token, how we handle it: I think in V4, we will only keep it for session, but in V2 and .net SDK, we may keep it for non-session consistency. After this change, what will happen in Java V4: 1. Trigger and UDF will be treated as master resources, and same as other master resource, we do not capture session token 2. We will not apply session token for query plan, trigger, UDF, and execute stored procedure
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
> sessionTokenApplicable: In java V4: we also skip applying the session token when Eventual Consistency is explicitly requested on request-level for data plane operations. This is not included in V2 and .net SDK. If consistency is eventual there is no need to set session token. This is intentional. Does this PR change this behaviour?
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
could you add a test to validate when session token is set vs not set? How to add such test: You can use ReflectionUtils.getTransportClient() and ReflectionUtils.getTransportClient to register a spy to watch what is sent to the TransportClient. That way you can validate any header in the request including session token.
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) {
private void applySessionToken(RxDocumentServiceRequest request) { Map<String, String> headers = request.getHeaders(); Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null"); String requestConsistencyLevel = headers.get(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL); boolean sessionTokenApplicable = Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.SESSION.toString()) || (this.defaultConsistencyLevel == ConsistencyLevel.SESSION && (!request.isReadOnlyRequest() || request.getResourceType() != ResourceType.Document || !Strings.areEqual(requestConsistencyLevel, ConsistencyLevel.EVENTUAL.toString()))); if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) { if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } return; } if (!sessionTokenApplicable || isMasterOperation(request.getResourceType(), request.getOperationType())) { return; } String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request); if (!Strings.isNullOrEmpty(sessionToken)) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken); } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }
class RxGatewayStoreModel implements RxStoreModel { private final static byte[] EMPTY_BYTE_ARRAY = {}; private final DiagnosticsClientContext clientContext; private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class); private final Map<String, String> defaultHeaders; private final HttpClient httpClient; private final QueryCompatibilityMode queryCompatibilityMode; private final GlobalEndpointManager globalEndpointManager; private ConsistencyLevel defaultConsistencyLevel; private ISessionContainer sessionContainer; public RxGatewayStoreModel( DiagnosticsClientContext clientContext, ISessionContainer sessionContainer, ConsistencyLevel defaultConsistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient) { this.clientContext = clientContext; this.defaultHeaders = new HashMap<>(); this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL, "no-cache"); this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION, HttpConstants.Versions.CURRENT_VERSION); if (userAgentContainer == null) { userAgentContainer = new UserAgentContainer(); } this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent()); if (defaultConsistencyLevel != null) { this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, defaultConsistencyLevel.toString()); } this.defaultConsistencyLevel = defaultConsistencyLevel; this.globalEndpointManager = globalEndpointManager; this.queryCompatibilityMode = queryCompatibilityMode; this.httpClient = httpClient; this.sessionContainer = sessionContainer; } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PATCH); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.PUT); } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.DELETE); } private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.POST); } private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return this.performRequest(request, HttpMethod.GET); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { if(request.getOperationType() != OperationType.QueryPlan) { request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true"); } switch (this.queryCompatibilityMode) { case SqlQuery: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.SQL); break; case Default: case Query: default: request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.QUERY_JSON); break; } return this.performRequest(request, HttpMethod.POST); } /** * Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse. * * @param request * @param method * @return Flux<RxDocumentServiceResponse> */ public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) { try { if (request.requestContext.cosmosDiagnostics == null) { request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics(); } URI uri = getUri(request); request.requestContext.resourcePhysicalAddress = uri.toString(); HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders()); Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux(); HttpRequest httpRequest = new HttpRequest(method, uri, uri.getPort(), httpHeaders, contentAsByteArray); Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds()); if (OperationType.QueryPlan.equals(request.getOperationType())) { responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds()); } else if (request.isAddressRefresh()) { responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds()); } Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout); return toDocumentServiceResponse(httpResponseMono, request); } catch (Exception e) { return Mono.error(e); } } private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) { HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); for (Entry<String, String> entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { httpHeaders.set(entry.getKey(), entry.getValue()); } } if (headers != null) { for (Entry<String, String> entry : headers.entrySet()) { if (entry.getValue() == null) { httpHeaders.set(entry.getKey(), ""); } else { httpHeaders.set(entry.getKey(), entry.getValue()); } } } return httpHeaders; } private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException { URI rootUri = request.getEndpointOverride(); if (rootUri == null) { if (request.getIsMedia()) { rootUri = this.globalEndpointManager.getWriteEndpoints().get(0); } else { rootUri = this.globalEndpointManager.resolveServiceEndpoint(request); } } String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed); if(request.getResourceType().equals(ResourceType.DatabaseAccount)) { path = StringUtils.EMPTY; } return new URI("https", null, rootUri.getHost(), rootUri.getPort(), ensureSlashPrefixed(path), null, null); } private String ensureSlashPrefixed(String path) { if (path == null) { return null; } if (path.startsWith("/")) { return path; } return "/" + path; } /** * Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable. * * * Once the customer code subscribes to the observable returned by the CRUD APIs, * the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made. * * @param httpResponseMono * @param request * @return {@link Mono} */ private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono, RxDocumentServiceRequest request) { return httpResponseMono.flatMap(httpResponse -> { HttpHeaders httpResponseHeaders = httpResponse.headers(); int httpResponseStatus = httpResponse.statusCode(); Mono<byte[]> contentObservable = httpResponse .bodyAsByteArray() .switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY)); return contentObservable .map(content -> { ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord(); if (reactorNettyRequestRecord != null) { reactorNettyRequestRecord.setTimeCompleted(Instant.now()); BridgeInternal.setTransportClientRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics, reactorNettyRequestRecord.takeTimelineSnapshot()); } validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content); StoreResponse rsp = new StoreResponse(httpResponseStatus, HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()), content); DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot()); if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null); DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics); } return rsp; }) .single(); }).map(rsp -> new RxDocumentServiceResponse(this.clientContext, rsp)) .onErrorResume(throwable -> { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (!(unwrappedException instanceof Exception)) { logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException); return Mono.error(unwrappedException); } Exception exception = (Exception) unwrappedException; CosmosException dce; if (!(exception instanceof CosmosException)) { logger.error("Network failure", exception); dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); } else { dce = (CosmosException) exception; } if (WebExceptionUtility.isNetworkFailure(dce)) { if (WebExceptionUtility.isReadTimeoutException(dce)) { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT); } else { BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE); } } if (request.requestContext.cosmosDiagnostics != null) { BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce); BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics); } return Mono.error(dce); }); } private void validateOrThrow(RxDocumentServiceRequest request, HttpResponseStatus status, HttpHeaders headers, byte[] bodyAsBytes) { int statusCode = status.code(); if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) { String statusCodeString = status.reasonPhrase() != null ? status.reasonPhrase().replace(" ", "") : ""; String body = bodyAsBytes != null ? new String(bodyAsBytes) : null; CosmosError cosmosError; cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError(); cosmosError = new CosmosError(statusCodeString, String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString), cosmosError.getPartitionedQueryExecutionInfo()); CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap()); BridgeInternal.setRequestHeaders(dce, request.getHeaders()); throw dce; } } private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) { switch (request.getOperationType()) { case Create: case Batch: return this.create(request); case Patch: return this.patch(request); case Upsert: return this.upsert(request); case Delete: return this.delete(request); case ExecuteJavaScript: return this.execute(request); case Read: return this.read(request); case ReadFeed: return this.readFeed(request); case Replace: return this.replace(request); case SqlQuery: case Query: case QueryPlan: return this.query(request); default: throw new IllegalStateException("Unknown operation setType " + request.getOperationType()); } } private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) { Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single(); return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy()); } @Override public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) { this.applySessionToken(request); Mono<RxDocumentServiceResponse> responseObs = invokeAsync(request); return responseObs.onErrorResume( e -> { CosmosException dce = Utils.as(e, CosmosException.class); if (dce == null) { logger.error("unexpected failure {}", e.getMessage(), e); return Mono.error(e); } if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) && (dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED || dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT || ( dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND && !Exceptions.isSubStatusCode(dce, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) { this.captureSessionToken(request, dce.getResponseHeaders()); } return Mono.error(dce); } ).map(response -> { this.captureSessionToken(request, response.getResponseHeaders()); return response; } ); } private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) { if (request.getResourceType() == ResourceType.DocumentCollection && request.getOperationType() == OperationType.Delete) { String resourceId; if (request.getIsNameBased()) { resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID); } else { resourceId = request.getResourceId(); } this.sessionContainer.clearTokenByResourceId(resourceId); } else { this.sessionContainer.setSessionToken(request, responseHeaders); } } private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) { return ReplicatedResourceClientUtils.isMasterResource(resourceType) || isStoredProcedureMasterOperation(resourceType, operationType) || operationType == OperationType.QueryPlan; } private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) { return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript; } }